Unverified Commit 74c9d27d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Auto-format tests. (#5313)



* [Misc] Auto-format tests.

* more

---------
Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-28-63.ap-northeast-1.compute.internal>
parent 86193c26
import unittest
import backend as F
import numpy as np
import dgl
import dgl.ndarray as nd
import numpy as np
@unittest.skipIf(
......
......@@ -735,11 +735,7 @@ def _test_DefaultDataParser():
# string consists of non-numeric values
with tempfile.TemporaryDirectory() as test_dir:
csv_path = os.path.join(test_dir, "nodes.csv")
df = pd.DataFrame(
{
"label": ["a", "b", "c"],
}
)
df = pd.DataFrame({"label": ["a", "b", "c"]})
df.to_csv(csv_path, index=False)
dp = DefaultDataParser()
df = pd.read_csv(csv_path)
......@@ -752,11 +748,7 @@ def _test_DefaultDataParser():
# csv has index column which is ignored as it's unnamed
with tempfile.TemporaryDirectory() as test_dir:
csv_path = os.path.join(test_dir, "nodes.csv")
df = pd.DataFrame(
{
"label": [1, 2, 3],
}
)
df = pd.DataFrame({"label": [1, 2, 3]})
df.to_csv(csv_path)
dp = DefaultDataParser()
df = pd.read_csv(csv_path)
......@@ -1042,9 +1034,7 @@ def _test_load_edge_data_from_csv():
# required headers are missing
df = pd.DataFrame(
{
"src_id": np.random.randint(num_nodes, size=num_edges),
}
{"src_id": np.random.randint(num_nodes, size=num_edges)}
)
csv_path = os.path.join(test_dir, "edges.csv")
df.to_csv(csv_path, index=False)
......@@ -1056,9 +1046,7 @@ def _test_load_edge_data_from_csv():
expect_except = True
assert expect_except
df = pd.DataFrame(
{
"dst_id": np.random.randint(num_nodes, size=num_edges),
}
{"dst_id": np.random.randint(num_nodes, size=num_edges)}
)
csv_path = os.path.join(test_dir, "edges.csv")
df.to_csv(csv_path, index=False)
......
......@@ -4,12 +4,12 @@ import time
import unittest
import backend as F
import numpy as np
import pytest
import scipy as sp
import dgl
import dgl.ndarray as nd
import numpy as np
import pytest
import scipy as sp
from dgl import DGLGraph
from dgl.data.utils import load_labels, load_tensors, save_tensors
......
import unittest
import backend as F
from test_utils import parametrize_idtype
import dgl
from dgl.dataloading import (
NeighborSampler,
as_edge_prediction_sampler,
negative_sampler,
NeighborSampler,
)
from test_utils import parametrize_idtype
def create_test_graph(idtype):
......
This diff is collapsed.
......@@ -4,18 +4,18 @@ import unittest
from collections import Counter
import backend as F
import dgl
import dgl.function as fn
import networkx as nx
import numpy as np
import pytest
import scipy.sparse as ssp
import test_utils
from scipy.sparse import rand
from test_utils import get_cases, parametrize_idtype
import dgl
import dgl.function as fn
from dgl import DGLError
from dgl.ops import edge_softmax
from scipy.sparse import rand
from test_utils import get_cases, parametrize_idtype
edge_softmax_shapes = [(1,), (1, 3), (3, 4, 5)]
rfuncs = {"sum": fn.sum, "max": fn.max, "min": fn.min, "mean": fn.mean}
......
import unittest
import backend as F
import dgl
import numpy as np
import backend as F
import unittest
from test_utils import parametrize_idtype
def tree1(idtype):
"""Generate a tree
0
......@@ -19,10 +22,11 @@ def tree1(idtype):
g.add_edges(4, 1)
g.add_edges(1, 0)
g.add_edges(2, 0)
g.ndata['h'] = F.tensor([0, 1, 2, 3, 4])
g.edata['h'] = F.randn((4, 10))
g.ndata["h"] = F.tensor([0, 1, 2, 3, 4])
g.edata["h"] = F.randn((4, 10))
return g
def tree2(idtype):
"""Generate a tree
1
......@@ -38,10 +42,11 @@ def tree2(idtype):
g.add_edges(0, 4)
g.add_edges(4, 1)
g.add_edges(3, 1)
g.ndata['h'] = F.tensor([0, 1, 2, 3, 4])
g.edata['h'] = F.randn((4, 10))
g.ndata["h"] = F.tensor([0, 1, 2, 3, 4])
g.edata["h"] = F.randn((4, 10))
return g
@parametrize_idtype
def test_batch_unbatch(idtype):
t1 = tree1(idtype)
......@@ -55,10 +60,11 @@ def test_batch_unbatch(idtype):
assert F.allclose(bg.batch_num_edges(), F.tensor([4, 4]))
tt1, tt2 = dgl.unbatch(bg)
assert F.allclose(t1.ndata['h'], tt1.ndata['h'])
assert F.allclose(t1.edata['h'], tt1.edata['h'])
assert F.allclose(t2.ndata['h'], tt2.ndata['h'])
assert F.allclose(t2.edata['h'], tt2.edata['h'])
assert F.allclose(t1.ndata["h"], tt1.ndata["h"])
assert F.allclose(t1.edata["h"], tt1.edata["h"])
assert F.allclose(t2.ndata["h"], tt2.ndata["h"])
assert F.allclose(t2.edata["h"], tt2.edata["h"])
@parametrize_idtype
def test_batch_unbatch1(idtype):
......@@ -73,14 +79,18 @@ def test_batch_unbatch1(idtype):
assert F.allclose(b2.batch_num_edges(), F.tensor([4, 4, 4]))
s1, s2, s3 = dgl.unbatch(b2)
assert F.allclose(t2.ndata['h'], s1.ndata['h'])
assert F.allclose(t2.edata['h'], s1.edata['h'])
assert F.allclose(t1.ndata['h'], s2.ndata['h'])
assert F.allclose(t1.edata['h'], s2.edata['h'])
assert F.allclose(t2.ndata['h'], s3.ndata['h'])
assert F.allclose(t2.edata['h'], s3.edata['h'])
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support inplace update")
assert F.allclose(t2.ndata["h"], s1.ndata["h"])
assert F.allclose(t2.edata["h"], s1.edata["h"])
assert F.allclose(t1.ndata["h"], s2.ndata["h"])
assert F.allclose(t1.edata["h"], s2.edata["h"])
assert F.allclose(t2.ndata["h"], s3.ndata["h"])
assert F.allclose(t2.edata["h"], s3.edata["h"])
@unittest.skipIf(
dgl.backend.backend_name == "tensorflow",
reason="TF doesn't support inplace update",
)
@parametrize_idtype
def test_batch_unbatch_frame(idtype):
"""Test module of node/edge frames of batched/unbatched DGLGraphs.
......@@ -93,30 +103,31 @@ def test_batch_unbatch_frame(idtype):
N2 = t2.number_of_nodes()
E2 = t2.number_of_edges()
D = 10
t1.ndata['h'] = F.randn((N1, D))
t1.edata['h'] = F.randn((E1, D))
t2.ndata['h'] = F.randn((N2, D))
t2.edata['h'] = F.randn((E2, D))
t1.ndata["h"] = F.randn((N1, D))
t1.edata["h"] = F.randn((E1, D))
t2.ndata["h"] = F.randn((N2, D))
t2.edata["h"] = F.randn((E2, D))
b1 = dgl.batch([t1, t2])
b2 = dgl.batch([t2])
b1.ndata['h'][:N1] = F.zeros((N1, D))
b1.edata['h'][:E1] = F.zeros((E1, D))
b2.ndata['h'][:N2] = F.zeros((N2, D))
b2.edata['h'][:E2] = F.zeros((E2, D))
assert not F.allclose(t1.ndata['h'], F.zeros((N1, D)))
assert not F.allclose(t1.edata['h'], F.zeros((E1, D)))
assert not F.allclose(t2.ndata['h'], F.zeros((N2, D)))
assert not F.allclose(t2.edata['h'], F.zeros((E2, D)))
b1.ndata["h"][:N1] = F.zeros((N1, D))
b1.edata["h"][:E1] = F.zeros((E1, D))
b2.ndata["h"][:N2] = F.zeros((N2, D))
b2.edata["h"][:E2] = F.zeros((E2, D))
assert not F.allclose(t1.ndata["h"], F.zeros((N1, D)))
assert not F.allclose(t1.edata["h"], F.zeros((E1, D)))
assert not F.allclose(t2.ndata["h"], F.zeros((N2, D)))
assert not F.allclose(t2.edata["h"], F.zeros((E2, D)))
g1, g2 = dgl.unbatch(b1)
_g2, = dgl.unbatch(b2)
assert F.allclose(g1.ndata['h'], F.zeros((N1, D)))
assert F.allclose(g1.edata['h'], F.zeros((E1, D)))
assert F.allclose(g2.ndata['h'], t2.ndata['h'])
assert F.allclose(g2.edata['h'], t2.edata['h'])
assert F.allclose(_g2.ndata['h'], F.zeros((N2, D)))
assert F.allclose(_g2.edata['h'], F.zeros((E2, D)))
(_g2,) = dgl.unbatch(b2)
assert F.allclose(g1.ndata["h"], F.zeros((N1, D)))
assert F.allclose(g1.edata["h"], F.zeros((E1, D)))
assert F.allclose(g2.ndata["h"], t2.ndata["h"])
assert F.allclose(g2.edata["h"], t2.edata["h"])
assert F.allclose(_g2.ndata["h"], F.zeros((N2, D)))
assert F.allclose(_g2.edata["h"], F.zeros((E2, D)))
@parametrize_idtype
def test_batch_unbatch2(idtype):
......@@ -128,10 +139,11 @@ def test_batch_unbatch2(idtype):
b.add_nodes(3)
b.add_edges(0, [1, 2])
c = dgl.batch([a, b])
c.ndata['h'] = F.ones((7, 1))
c.edata['w'] = F.ones((5, 1))
assert F.allclose(c.ndata['h'], F.ones((7, 1)))
assert F.allclose(c.edata['w'], F.ones((5, 1)))
c.ndata["h"] = F.ones((7, 1))
c.edata["w"] = F.ones((5, 1))
assert F.allclose(c.ndata["h"], F.ones((7, 1)))
assert F.allclose(c.edata["w"], F.ones((5, 1)))
@parametrize_idtype
def test_batch_send_and_recv(idtype):
......@@ -139,16 +151,17 @@ def test_batch_send_and_recv(idtype):
t2 = tree2(idtype)
bg = dgl.batch([t1, t2])
_mfunc = lambda edges: {'m' : edges.src['h']}
_rfunc = lambda nodes: {'h' : F.sum(nodes.mailbox['m'], 1)}
_mfunc = lambda edges: {"m": edges.src["h"]}
_rfunc = lambda nodes: {"h": F.sum(nodes.mailbox["m"], 1)}
u = [3, 4, 2 + 5, 0 + 5]
v = [1, 1, 4 + 5, 4 + 5]
bg.send_and_recv((u, v), _mfunc, _rfunc)
t1, t2 = dgl.unbatch(bg)
assert F.asnumpy(t1.ndata['h'][1]) == 7
assert F.asnumpy(t2.ndata['h'][4]) == 2
assert F.asnumpy(t1.ndata["h"][1]) == 7
assert F.asnumpy(t2.ndata["h"][4]) == 2
@parametrize_idtype
def test_batch_propagate(idtype):
......@@ -156,8 +169,8 @@ def test_batch_propagate(idtype):
t2 = tree2(idtype)
bg = dgl.batch([t1, t2])
_mfunc = lambda edges: {'m' : edges.src['h']}
_rfunc = lambda nodes: {'h' : F.sum(nodes.mailbox['m'], 1)}
_mfunc = lambda edges: {"m": edges.src["h"]}
_rfunc = lambda nodes: {"h": F.sum(nodes.mailbox["m"], 1)}
# get leaves.
order = []
......@@ -175,8 +188,9 @@ def test_batch_propagate(idtype):
bg.prop_edges(order, _mfunc, _rfunc)
t1, t2 = dgl.unbatch(bg)
assert F.asnumpy(t1.ndata['h'][0]) == 9
assert F.asnumpy(t2.ndata['h'][1]) == 5
assert F.asnumpy(t1.ndata["h"][0]) == 9
assert F.asnumpy(t2.ndata["h"][1]) == 5
@parametrize_idtype
def test_batched_edge_ordering(idtype):
......@@ -184,17 +198,18 @@ def test_batched_edge_ordering(idtype):
g1.add_nodes(6)
g1.add_edges([4, 4, 2, 2, 0], [5, 3, 3, 1, 1])
e1 = F.randn((5, 10))
g1.edata['h'] = e1
g1.edata["h"] = e1
g2 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g2.add_nodes(6)
g2.add_edges([0, 1 ,2 ,5, 4 ,5], [1, 2, 3, 4, 3, 0])
g2.add_edges([0, 1, 2, 5, 4, 5], [1, 2, 3, 4, 3, 0])
e2 = F.randn((6, 10))
g2.edata['h'] = e2
g2.edata["h"] = e2
g = dgl.batch([g1, g2])
r1 = g.edata['h'][g.edge_ids(4, 5)]
r2 = g1.edata['h'][g1.edge_ids(4, 5)]
r1 = g.edata["h"][g.edge_ids(4, 5)]
r2 = g1.edata["h"][g1.edge_ids(4, 5)]
assert F.array_equal(r1, r2)
@parametrize_idtype
def test_batch_no_edge(idtype):
g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
......@@ -202,11 +217,12 @@ def test_batch_no_edge(idtype):
g1.add_edges([4, 4, 2, 2, 0], [5, 3, 3, 1, 1])
g2 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g2.add_nodes(6)
g2.add_edges([0, 1, 2, 5, 4, 5], [1 ,2 ,3, 4, 3, 0])
g2.add_edges([0, 1, 2, 5, 4, 5], [1, 2, 3, 4, 3, 0])
g3 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g3.add_nodes(1) # no edges
g = dgl.batch([g1, g3, g2]) # should not throw an error
@parametrize_idtype
def test_batch_keeps_empty_data(idtype):
g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
......@@ -219,6 +235,7 @@ def test_batch_keeps_empty_data(idtype):
assert "nh" in g.ndata
assert "eh" in g.edata
def _get_subgraph_batch_info(keys, induced_indices_arr, batch_num_objs):
"""Internal function to compute batch information for subgraphs.
Parameters
......@@ -235,12 +252,16 @@ def _get_subgraph_batch_info(keys, induced_indices_arr, batch_num_objs):
A dictionary mapping all node/edge type keys to the ``batch_num_objs``
array of corresponding graph.
"""
bucket_offset = np.expand_dims(np.cumsum(F.asnumpy(batch_num_objs), 0), -1) # (num_bkts, 1)
bucket_offset = np.expand_dims(
np.cumsum(F.asnumpy(batch_num_objs), 0), -1
) # (num_bkts, 1)
ret = {}
for key, induced_indices in zip(keys, induced_indices_arr):
# NOTE(Zihao): this implementation is not efficient and we can replace it with
# binary search in the future.
induced_indices = np.expand_dims(F.asnumpy(induced_indices), 0) # (1, num_nodes)
induced_indices = np.expand_dims(
F.asnumpy(induced_indices), 0
) # (1, num_nodes)
new_offset = np.sum((induced_indices < bucket_offset), 1) # (num_bkts,)
# start_offset = [0] + [new_offset[i-1] for i in range(1, n_bkts)]
start_offset = np.concatenate([np.zeros((1,)), new_offset[:-1]], 0)
......@@ -248,6 +269,7 @@ def _get_subgraph_batch_info(keys, induced_indices_arr, batch_num_objs):
ret[key] = F.tensor(new_batch_num_objs, dtype=F.dtype(batch_num_objs))
return ret
@parametrize_idtype
def test_set_batch_info(idtype):
ctx = F.ctx()
......@@ -260,10 +282,14 @@ def test_set_batch_info(idtype):
# test homogeneous node subgraph
sg_n = dgl.node_subgraph(bg, list(range(10, 20)) + list(range(50, 60)))
induced_nodes = sg_n.ndata['_ID']
induced_edges = sg_n.edata['_ID']
new_batch_num_nodes = _get_subgraph_batch_info(bg.ntypes, [induced_nodes], batch_num_nodes)
new_batch_num_edges = _get_subgraph_batch_info(bg.canonical_etypes, [induced_edges], batch_num_edges)
induced_nodes = sg_n.ndata["_ID"]
induced_edges = sg_n.edata["_ID"]
new_batch_num_nodes = _get_subgraph_batch_info(
bg.ntypes, [induced_nodes], batch_num_nodes
)
new_batch_num_edges = _get_subgraph_batch_info(
bg.canonical_etypes, [induced_edges], batch_num_edges
)
sg_n.set_batch_num_nodes(new_batch_num_nodes)
sg_n.set_batch_num_edges(new_batch_num_edges)
subg_n1, subg_n2 = dgl.unbatch(sg_n)
......@@ -273,11 +299,17 @@ def test_set_batch_info(idtype):
assert subg_n2.num_edges() == subg2.num_edges()
# test homogeneous edge subgraph
sg_e = dgl.edge_subgraph(bg, list(range(40, 70)) + list(range(150, 200)), relabel_nodes=False)
sg_e = dgl.edge_subgraph(
bg, list(range(40, 70)) + list(range(150, 200)), relabel_nodes=False
)
induced_nodes = F.arange(0, bg.num_nodes(), idtype)
induced_edges = sg_e.edata['_ID']
new_batch_num_nodes = _get_subgraph_batch_info(bg.ntypes, [induced_nodes], batch_num_nodes)
new_batch_num_edges = _get_subgraph_batch_info(bg.canonical_etypes, [induced_edges], batch_num_edges)
induced_edges = sg_e.edata["_ID"]
new_batch_num_nodes = _get_subgraph_batch_info(
bg.ntypes, [induced_nodes], batch_num_nodes
)
new_batch_num_edges = _get_subgraph_batch_info(
bg.canonical_etypes, [induced_edges], batch_num_edges
)
sg_e.set_batch_num_nodes(new_batch_num_nodes)
sg_e.set_batch_num_edges(new_batch_num_edges)
subg_e1, subg_e2 = dgl.unbatch(sg_e)
......@@ -287,15 +319,14 @@ def test_set_batch_info(idtype):
assert subg_e2.num_nodes() == subg2.num_nodes()
if __name__ == '__main__':
#test_batch_unbatch()
#test_batch_unbatch1()
#test_batch_unbatch_frame()
#test_batch_unbatch2()
#test_batched_edge_ordering()
#test_batch_send_then_recv()
#test_batch_send_and_recv()
#test_batch_propagate()
#test_batch_no_edge()
if __name__ == "__main__":
# test_batch_unbatch()
# test_batch_unbatch1()
# test_batch_unbatch_frame()
# test_batch_unbatch2()
# test_batched_edge_ordering()
# test_batch_send_then_recv()
# test_batch_send_and_recv()
# test_batch_propagate()
# test_batch_no_edge()
test_set_batch_info(F.int32)
......@@ -2,10 +2,10 @@ import os
import unittest
import backend as F
import numpy as np
import pytest
import dgl
import numpy as np
import pytest
@unittest.skipIf(os.name == "nt", reason="Cython only works on linux")
......
......@@ -2,12 +2,12 @@ import pickle
import unittest
import backend as F
import numpy as np
from test_utils import parametrize_idtype
import dgl
import dgl.ndarray as nd
import numpy as np
from dgl.frame import Column
from test_utils import parametrize_idtype
def test_column_subcolumn():
......
import unittest
import backend as F
import numpy as np
import dgl
import numpy as np
@unittest.skipIf(
......
......@@ -4,18 +4,18 @@ from collections import Counter
from itertools import product
import backend as F
import dgl
import dgl.function as fn
import networkx as nx
import numpy as np
import pytest
import scipy.sparse as ssp
import test_utils
from dgl import DGLError
from scipy.sparse import rand
from test_utils import get_cases, parametrize_idtype
import dgl
import dgl.function as fn
from dgl import DGLError
rfuncs = {"sum": fn.sum, "max": fn.max, "min": fn.min, "mean": fn.mean}
fill_value = {"sum": 0, "max": float("-inf")}
feat_size = 2
......@@ -51,7 +51,6 @@ def create_test_heterograph(idtype):
@parametrize_idtype
def test_unary_copy_u(idtype):
def _test(mfunc):
g = create_test_heterograph(idtype)
x1 = F.randn((g.num_nodes("user"), feat_size))
......@@ -108,7 +107,6 @@ def test_unary_copy_u(idtype):
@parametrize_idtype
def test_unary_copy_e(idtype):
def _test(mfunc):
g = create_test_heterograph(idtype)
feat_size = 2
......@@ -168,7 +166,6 @@ def test_unary_copy_e(idtype):
@parametrize_idtype
def test_binary_op(idtype):
def _test(lhs, rhs, binary_op):
g = create_test_heterograph(idtype)
n1 = F.randn((g.num_nodes("user"), feat_size))
......@@ -237,6 +234,7 @@ def test_binary_op(idtype):
loss = F.sum(r2.view(-1), 0)
F.backward(loss)
n_grad2 = F.grad(g.nodes["game"].data["h"])
# correctness check
def _print_error(a, b):
for i, (x, y) in enumerate(
......
from itertools import product
import backend as F
import dgl
import dgl.function as fn
import networkx as nx
import numpy as np
import backend as F
from itertools import product
from test_utils import parametrize_idtype, get_cases
import pytest
from test_utils import get_cases, parametrize_idtype
def udf_copy_src(edges):
return {'m': edges.src['u']}
return {"m": edges.src["u"]}
def udf_copy_edge(edges):
return {'m': edges.data['e']}
return {"m": edges.data["e"]}
def udf_mean(nodes):
return {'r2': F.mean(nodes.mailbox['m'], 1)}
return {"r2": F.mean(nodes.mailbox["m"], 1)}
def udf_sum(nodes):
return {'r2': F.sum(nodes.mailbox['m'], 1)}
return {"r2": F.sum(nodes.mailbox["m"], 1)}
def udf_max(nodes):
return {'r2': F.max(nodes.mailbox['m'], 1)}
return {"r2": F.max(nodes.mailbox["m"], 1)}
D1 = 5
D2 = 3
D3 = 4
D4 = 10 # NOTE(xiang): used to dot feature vector
builtin = {'sum': fn.sum, 'max': fn.max, 'mean': fn.mean}
udf_reduce = {'sum': udf_sum, 'max': udf_max, 'mean': udf_mean}
fill_value = {'sum': 0, 'max': float("-inf")}
builtin = {"sum": fn.sum, "max": fn.max, "mean": fn.mean}
udf_reduce = {"sum": udf_sum, "max": udf_max, "mean": udf_mean}
fill_value = {"sum": 0, "max": float("-inf")}
def generate_feature(g, broadcast='none', binary_op='none'):
def generate_feature(g, broadcast="none", binary_op="none"):
"""Create graph with src, edge, dst feature. broadcast can be 'u',
'e', 'v', 'none'
"""
np.random.seed(31)
nv = g.number_of_nodes()
ne = g.number_of_edges()
if binary_op == 'dot':
if broadcast == 'e':
if binary_op == "dot":
if broadcast == "e":
u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D2, 1, D4)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4)))
elif broadcast == 'u':
elif broadcast == "u":
u = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1, D4)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3, D4)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4)))
elif broadcast == 'v':
elif broadcast == "v":
u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3, D4)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1, D4)))
......@@ -57,15 +64,15 @@ def generate_feature(g, broadcast='none', binary_op='none'):
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3, D4)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4)))
else:
if broadcast == 'e':
if broadcast == "e":
u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D2, 1)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
elif broadcast == 'u':
elif broadcast == "u":
u = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
elif broadcast == 'v':
elif broadcast == "v":
u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1)))
......@@ -73,7 +80,11 @@ def generate_feature(g, broadcast='none', binary_op='none'):
u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
return F.astype(u, F.float32), F.astype(v, F.float32), F.astype(e, F.float32)
return (
F.astype(u, F.float32),
F.astype(v, F.float32),
F.astype(e, F.float32),
)
def test_copy_src_reduce():
......@@ -83,60 +94,65 @@ def test_copy_src_reduce():
# https://github.com/dmlc/dgl/issues/761
g.add_edges(g.nodes(), g.nodes())
g = g.to(F.ctx())
hu, hv, he = generate_feature(g, 'none', 'none')
hu, hv, he = generate_feature(g, "none", "none")
if partial:
nid = F.tensor(list(range(0, 100, 2)), g.idtype)
g.ndata['u'] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he))
g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata["e"] = F.attach_grad(F.clone(he))
with F.record_grad():
if partial:
g.pull(nid, fn.copy_u(u='u', out='m'),
builtin[red](msg='m', out='r1'))
g.pull(
nid,
fn.copy_u(u="u", out="m"),
builtin[red](msg="m", out="r1"),
)
else:
g.update_all(fn.copy_u(u='u', out='m'),
builtin[red](msg='m', out='r1'))
r1 = g.ndata['r1']
g.update_all(
fn.copy_u(u="u", out="m"), builtin[red](msg="m", out="r1")
)
r1 = g.ndata["r1"]
F.backward(F.reduce_sum(r1))
n_grad1 = F.grad(g.ndata['u'])
n_grad1 = F.grad(g.ndata["u"])
# reset grad
g.ndata['u'] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he))
g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata["e"] = F.attach_grad(F.clone(he))
with F.record_grad():
if partial:
g.pull(nid, udf_copy_src, udf_reduce[red])
else:
g.update_all(udf_copy_src, udf_reduce[red])
r2 = g.ndata['r2']
r2 = g.ndata["r2"]
F.backward(F.reduce_sum(r2))
n_grad2 = F.grad(g.ndata['u'])
n_grad2 = F.grad(g.ndata["u"])
def _print_error(a, b):
print("ERROR: Test copy_src_{} partial: {}".
format(red, partial))
for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())):
print("ERROR: Test copy_src_{} partial: {}".format(red, partial))
for i, (x, y) in enumerate(
zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())
):
if not np.allclose(x, y):
print('@{} {} v.s. {}'.format(i, x, y))
print("@{} {} v.s. {}".format(i, x, y))
if not F.allclose(r1, r2):
_print_error(r1, r2)
assert F.allclose(r1, r2)
if not F.allclose(n_grad1, n_grad2):
print('node grad')
print("node grad")
_print_error(n_grad1, n_grad2)
assert(F.allclose(n_grad1, n_grad2))
assert F.allclose(n_grad1, n_grad2)
_test('sum', False)
_test('max', False)
_test('mean', False)
_test('sum', True)
_test('max', True)
_test('mean', True)
_test("sum", False)
_test("max", False)
_test("mean", False)
_test("sum", True)
_test("max", True)
_test("mean", True)
def test_copy_edge_reduce():
......@@ -145,80 +161,85 @@ def test_copy_edge_reduce():
# NOTE(zihao): add self-loop to avoid zero-degree nodes.
g.add_edges(g.nodes(), g.nodes())
g = g.to(F.ctx())
hu, hv, he = generate_feature(g, 'none', 'none')
hu, hv, he = generate_feature(g, "none", "none")
if partial:
nid = F.tensor(list(range(0, 100, 2)), g.idtype)
g.ndata['u'] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he))
g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata["e"] = F.attach_grad(F.clone(he))
with F.record_grad():
if partial:
g.pull(nid, fn.copy_e(e='e', out='m'),
builtin[red](msg='m', out='r1'))
g.pull(
nid,
fn.copy_e(e="e", out="m"),
builtin[red](msg="m", out="r1"),
)
else:
g.update_all(fn.copy_e(e='e', out='m'),
builtin[red](msg='m', out='r1'))
r1 = g.ndata['r1']
g.update_all(
fn.copy_e(e="e", out="m"), builtin[red](msg="m", out="r1")
)
r1 = g.ndata["r1"]
F.backward(F.reduce_sum(r1))
e_grad1 = F.grad(g.edata['e'])
e_grad1 = F.grad(g.edata["e"])
# reset grad
g.ndata['u'] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he))
g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata["e"] = F.attach_grad(F.clone(he))
with F.record_grad():
if partial:
g.pull(nid, udf_copy_edge, udf_reduce[red])
else:
g.update_all(udf_copy_edge, udf_reduce[red])
r2 = g.ndata['r2']
r2 = g.ndata["r2"]
F.backward(F.reduce_sum(r2))
e_grad2 = F.grad(g.edata['e'])
e_grad2 = F.grad(g.edata["e"])
def _print_error(a, b):
print("ERROR: Test copy_edge_{} partial: {}".
format(red, partial))
print("ERROR: Test copy_edge_{} partial: {}".format(red, partial))
return
for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())):
for i, (x, y) in enumerate(
zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())
):
if not np.allclose(x, y):
print('@{} {} v.s. {}'.format(i, x, y))
print("@{} {} v.s. {}".format(i, x, y))
if not F.allclose(r1, r2):
_print_error(r1, r2)
assert F.allclose(r1, r2)
if not F.allclose(e_grad1, e_grad2):
print('edge gradient')
print("edge gradient")
_print_error(e_grad1, e_grad2)
assert(F.allclose(e_grad1, e_grad2))
assert F.allclose(e_grad1, e_grad2)
_test('sum', False)
_test('max', False)
_test('mean', False)
_test('sum', True)
_test('max', True)
_test('mean', True)
_test("sum", False)
_test("max", False)
_test("mean", False)
_test("sum", True)
_test("max", True)
_test("mean", True)
def test_all_binary_builtins():
def _test(g, lhs, rhs, binary_op, reducer, partial, nid, broadcast='none'):
def _test(g, lhs, rhs, binary_op, reducer, partial, nid, broadcast="none"):
# initialize node/edge features with uniform(-1, 1)
hu, hv, he = generate_feature(g, broadcast, binary_op)
if binary_op == 'div':
if binary_op == "div":
# op = div
# lhs range: [-1, 1]
# rhs range: [1, 2]
# result range: [-1, 1]
if rhs == 'u':
if rhs == "u":
hu = (hu + 3) / 2
elif rhs == 'v':
elif rhs == "v":
hv = (hv + 3) / 2
elif rhs == 'e':
elif rhs == "e":
he = (he + 3) / 2
if binary_op == 'add' or binary_op == 'sub':
if binary_op == "add" or binary_op == "sub":
# op = add, sub
# lhs range: [-1/2, 1/2]
# rhs range: [-1/2, 1/2]
......@@ -227,9 +248,9 @@ def test_all_binary_builtins():
hv = hv / 2
he = he / 2
g.ndata['u'] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he))
g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata["e"] = F.attach_grad(F.clone(he))
builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs)
builtin_msg = getattr(fn, builtin_msg_name)
......@@ -245,18 +266,18 @@ def test_all_binary_builtins():
with F.record_grad():
if partial:
g.pull(nid, builtin_msg(lhs, rhs, 'm'), builtin_red('m', 'r1'))
g.pull(nid, builtin_msg(lhs, rhs, "m"), builtin_red("m", "r1"))
else:
g.update_all(builtin_msg(lhs, rhs, 'm'), builtin_red('m', 'r1'))
r1 = g.ndata.pop('r1')
g.update_all(builtin_msg(lhs, rhs, "m"), builtin_red("m", "r1"))
r1 = g.ndata.pop("r1")
F.backward(F.reduce_sum(r1))
lhs_grad_1 = F.grad(target_feature_switch(g, lhs))
rhs_grad_1 = F.grad(target_feature_switch(g, rhs))
# reset grad
g.ndata['u'] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he))
g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata["e"] = F.attach_grad(F.clone(he))
def target_switch(edges, target):
if target == "u":
......@@ -266,7 +287,7 @@ def test_all_binary_builtins():
elif target == "e":
return edges.data
else:
assert(0), "Unknown target {}".format(target)
assert 0, "Unknown target {}".format(target)
def mfunc(edges):
op = getattr(F, binary_op)
......@@ -282,15 +303,15 @@ def test_all_binary_builtins():
def rfunc(nodes):
op = getattr(F, reducer)
return {"r2": op(nodes.mailbox['m'], 1)}
return {"r2": op(nodes.mailbox["m"], 1)}
with F.record_grad():
if partial:
g.pull(nid, mfunc, rfunc)
else:
g.update_all(mfunc, rfunc)
r2 = g.ndata.pop('r2')
F.backward(F.reduce_sum(r2), F.tensor([1.]))
r2 = g.ndata.pop("r2")
F.backward(F.reduce_sum(r2), F.tensor([1.0]))
lhs_grad_2 = F.grad(target_feature_switch(g, lhs))
rhs_grad_2 = F.grad(target_feature_switch(g, rhs))
......@@ -298,27 +319,32 @@ def test_all_binary_builtins():
atol = 1e-4
def _print_error(a, b):
print("ERROR: Test {}_{}_{}_{} broadcast: {} partial: {}".
format(lhs, binary_op, rhs, reducer, broadcast, partial))
print(
"ERROR: Test {}_{}_{}_{} broadcast: {} partial: {}".format(
lhs, binary_op, rhs, reducer, broadcast, partial
)
)
return
if lhs == 'u':
if lhs == "u":
lhs_data = hu
elif lhs == 'v':
elif lhs == "v":
lhs_data = hv
elif lhs == 'e':
elif lhs == "e":
lhs_data = he
if rhs == 'u':
if rhs == "u":
rhs_data = hu
elif rhs == 'v':
elif rhs == "v":
rhs_data = hv
elif rhs == 'e':
elif rhs == "e":
rhs_data = he
print("lhs", F.asnumpy(lhs_data).tolist())
print("rhs", F.asnumpy(rhs_data).tolist())
for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())):
for i, (x, y) in enumerate(
zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())
):
if not np.allclose(x, y, rtol, atol):
print('@{} {} v.s. {}'.format(i, x, y))
print("@{} {} v.s. {}".format(i, x, y))
if not F.allclose(r1, r2, rtol, atol):
_print_error(r1, r2)
......@@ -327,12 +353,12 @@ def test_all_binary_builtins():
if not F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol):
print("left grad")
_print_error(lhs_grad_1, lhs_grad_2)
assert(F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol))
assert F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol)
if not F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol):
print("right grad")
_print_error(rhs_grad_1, rhs_grad_2)
assert(F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol))
assert F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol)
g = dgl.DGLGraph()
g.add_nodes(20)
......@@ -359,20 +385,30 @@ def test_all_binary_builtins():
for broadcast in ["none", lhs, rhs]:
for partial in [False, True]:
print(lhs, rhs, binary_op, reducer, broadcast, partial)
_test(g, lhs, rhs, binary_op, reducer, partial, nid,
broadcast=broadcast)
_test(
g,
lhs,
rhs,
binary_op,
reducer,
partial,
nid,
broadcast=broadcast,
)
@parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo-zero-degree']))
@pytest.mark.parametrize("g", get_cases(["homo-zero-degree"]))
def test_mean_zero_degree(g, idtype):
g = g.astype(idtype).to(F.ctx())
g.ndata['h'] = F.ones((g.number_of_nodes(), 3))
g.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'x'))
g.ndata["h"] = F.ones((g.number_of_nodes(), 3))
g.update_all(fn.copy_u("h", "m"), fn.mean("m", "x"))
deg = F.asnumpy(g.in_degrees())
v = F.tensor(np.where(deg == 0)[0])
assert F.allclose(F.gather_row(g.ndata['x'], v), F.zeros((len(v), 3)))
assert F.allclose(F.gather_row(g.ndata["x"], v), F.zeros((len(v), 3)))
if __name__ == '__main__':
if __name__ == "__main__":
test_copy_src_reduce()
test_copy_edge_reduce()
test_all_binary_builtins()
import math
import numbers
import backend as F
import dgl
import networkx as nx
import numpy as np
import pytest
import scipy.sparse as sp
import networkx as nx
import dgl
import backend as F
from dgl import DGLError
import pytest
# graph generation: a random graph with 10 nodes
# and 20 edges.
......@@ -22,6 +25,7 @@ def edge_pair_input(sort=False):
dst = [9, 6, 3, 9, 4, 4, 9, 9, 1, 8, 3, 2, 8, 1, 5, 7, 3, 2, 6, 5]
return src, dst
def nx_input():
g = nx.DiGraph()
src, dst = edge_pair_input()
......@@ -29,22 +33,26 @@ def nx_input():
g.add_edge(*e, id=i)
return g
def elist_input():
src, dst = edge_pair_input()
return list(zip(src, dst))
def scipy_coo_input():
src, dst = edge_pair_input()
return sp.coo_matrix((np.ones((20,)), (src, dst)), shape=(10,10))
return sp.coo_matrix((np.ones((20,)), (src, dst)), shape=(10, 10))
def scipy_csr_input():
src, dst = edge_pair_input()
csr = sp.coo_matrix((np.ones((20,)), (src, dst)), shape=(10,10)).tocsr()
csr = sp.coo_matrix((np.ones((20,)), (src, dst)), shape=(10, 10)).tocsr()
csr.sort_indices()
# src = [0 0 0 1 1 2 2 3 3 4 4 4 4 5 5 6 7 7 7 9]
# dst = [4 6 9 3 5 3 7 5 8 1 3 4 9 1 9 6 2 8 9 2]
return csr
def gen_by_mutation():
g = dgl.DGLGraph()
src, dst = edge_pair_input()
......@@ -52,9 +60,11 @@ def gen_by_mutation():
g.add_edges(src, dst)
return g
def gen_from_data(data, readonly, sort):
return dgl.DGLGraph(data, readonly=readonly, sort_csr=True)
def test_query():
def _test_one(g):
assert g.number_of_nodes() == 10
......@@ -63,45 +73,63 @@ def test_query():
for i in range(10):
assert g.has_nodes(i)
assert not g.has_nodes(11)
assert F.allclose(g.has_nodes([0,2,10,11]), F.tensor([1,1,0,0]))
assert F.allclose(g.has_nodes([0, 2, 10, 11]), F.tensor([1, 1, 0, 0]))
src, dst = edge_pair_input()
for u, v in zip(src, dst):
assert g.has_edges_between(u, v)
assert not g.has_edges_between(0, 0)
assert F.allclose(g.has_edges_between([0, 0, 3], [0, 9, 8]), F.tensor([0,1,1]))
assert set(F.asnumpy(g.predecessors(9))) == set([0,5,7,4])
assert set(F.asnumpy(g.successors(2))) == set([7,3])
assert F.allclose(
g.has_edges_between([0, 0, 3], [0, 9, 8]), F.tensor([0, 1, 1])
)
assert set(F.asnumpy(g.predecessors(9))) == set([0, 5, 7, 4])
assert set(F.asnumpy(g.successors(2))) == set([7, 3])
assert g.edge_ids(4,4) == 5
assert F.allclose(g.edge_ids([4,0], [4,9]), F.tensor([5,0]))
assert g.edge_ids(4, 4) == 5
assert F.allclose(g.edge_ids([4, 0], [4, 9]), F.tensor([5, 0]))
src, dst = g.find_edges([3, 6, 5])
assert F.allclose(src, F.tensor([5, 7, 4]))
assert F.allclose(dst, F.tensor([9, 9, 4]))
src, dst, eid = g.in_edges(9, form='all')
src, dst, eid = g.in_edges(9, form="all")
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
assert set(tup) == set([(0,9,0),(5,9,3),(7,9,6),(4,9,7)])
src, dst, eid = g.in_edges([9,0,8], form='all') # test node#0 has no in edges
assert set(tup) == set([(0, 9, 0), (5, 9, 3), (7, 9, 6), (4, 9, 7)])
src, dst, eid = g.in_edges(
[9, 0, 8], form="all"
) # test node#0 has no in edges
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
assert set(tup) == set([(0,9,0),(5,9,3),(7,9,6),(4,9,7),(3,8,9),(7,8,12)])
assert set(tup) == set(
[(0, 9, 0), (5, 9, 3), (7, 9, 6), (4, 9, 7), (3, 8, 9), (7, 8, 12)]
)
src, dst, eid = g.out_edges(0, form='all')
src, dst, eid = g.out_edges(0, form="all")
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
assert set(tup) == set([(0,9,0),(0,6,1),(0,4,4)])
src, dst, eid = g.out_edges([0,4,8], form='all') # test node#8 has no out edges
assert set(tup) == set([(0, 9, 0), (0, 6, 1), (0, 4, 4)])
src, dst, eid = g.out_edges(
[0, 4, 8], form="all"
) # test node#8 has no out edges
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
assert set(tup) == set([(0,9,0),(0,6,1),(0,4,4),(4,3,2),(4,4,5),(4,9,7),(4,1,8)])
src, dst, eid = g.edges('all', 'eid')
assert set(tup) == set(
[
(0, 9, 0),
(0, 6, 1),
(0, 4, 4),
(4, 3, 2),
(4, 4, 5),
(4, 9, 7),
(4, 1, 8),
]
)
src, dst, eid = g.edges("all", "eid")
t_src, t_dst = edge_pair_input()
t_tup = list(zip(t_src, t_dst, list(range(20))))
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
assert set(tup) == set(t_tup)
assert list(F.asnumpy(eid)) == list(range(20))
src, dst, eid = g.edges('all', 'srcdst')
src, dst, eid = g.edges("all", "srcdst")
t_src, t_dst = edge_pair_input()
t_tup = list(zip(t_src, t_dst, list(range(20))))
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
......@@ -116,9 +144,13 @@ def test_query():
assert F.allclose(g.out_degrees([8, 9]), F.tensor([0, 1]))
assert np.array_equal(
F.sparse_to_numpy(g.adjacency_matrix(transpose=True)), scipy_coo_input().toarray().T)
F.sparse_to_numpy(g.adjacency_matrix(transpose=True)),
scipy_coo_input().toarray().T,
)
assert np.array_equal(
F.sparse_to_numpy(g.adjacency_matrix(transpose=False)), scipy_coo_input().toarray())
F.sparse_to_numpy(g.adjacency_matrix(transpose=False)),
scipy_coo_input().toarray(),
)
def _test(g):
# test twice to see whether the cached format works or not
......@@ -132,48 +164,73 @@ def test_query():
for i in range(10):
assert g.has_nodes(i)
assert not g.has_nodes(11)
assert F.allclose(g.has_nodes([0,2,10,11]), F.tensor([1,1,0,0]))
assert F.allclose(g.has_nodes([0, 2, 10, 11]), F.tensor([1, 1, 0, 0]))
src, dst = edge_pair_input(sort=True)
for u, v in zip(src, dst):
assert g.has_edges_between(u, v)
assert not g.has_edges_between(0, 0)
assert F.allclose(g.has_edges_between([0, 0, 3], [0, 9, 8]), F.tensor([0,1,1]))
assert set(F.asnumpy(g.predecessors(9))) == set([0,5,7,4])
assert set(F.asnumpy(g.successors(2))) == set([7,3])
assert F.allclose(
g.has_edges_between([0, 0, 3], [0, 9, 8]), F.tensor([0, 1, 1])
)
assert set(F.asnumpy(g.predecessors(9))) == set([0, 5, 7, 4])
assert set(F.asnumpy(g.successors(2))) == set([7, 3])
# src = [0 0 0 1 1 2 2 3 3 4 4 4 4 5 5 6 7 7 7 9]
# dst = [4 6 9 3 5 3 7 5 8 1 3 4 9 1 9 6 2 8 9 2]
# eid = [0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9]
assert g.edge_ids(4,4) == 11
assert F.allclose(g.edge_ids([4,0], [4,9]), F.tensor([11,2]))
assert g.edge_ids(4, 4) == 11
assert F.allclose(g.edge_ids([4, 0], [4, 9]), F.tensor([11, 2]))
src, dst = g.find_edges([3, 6, 5])
assert F.allclose(src, F.tensor([1, 2, 2]))
assert F.allclose(dst, F.tensor([3, 7, 3]))
src, dst, eid = g.in_edges(9, form='all')
src, dst, eid = g.in_edges(9, form="all")
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
assert set(tup) == set([(0,9,2),(5,9,14),(7,9,18),(4,9,12)])
src, dst, eid = g.in_edges([9,0,8], form='all') # test node#0 has no in edges
assert set(tup) == set([(0, 9, 2), (5, 9, 14), (7, 9, 18), (4, 9, 12)])
src, dst, eid = g.in_edges(
[9, 0, 8], form="all"
) # test node#0 has no in edges
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
assert set(tup) == set([(0,9,2),(5,9,14),(7,9,18),(4,9,12),(3,8,8),(7,8,17)])
src, dst, eid = g.out_edges(0, form='all')
assert set(tup) == set(
[
(0, 9, 2),
(5, 9, 14),
(7, 9, 18),
(4, 9, 12),
(3, 8, 8),
(7, 8, 17),
]
)
src, dst, eid = g.out_edges(0, form="all")
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
assert set(tup) == set([(0,9,2),(0,6,1),(0,4,0)])
src, dst, eid = g.out_edges([0,4,8], form='all') # test node#8 has no out edges
assert set(tup) == set([(0, 9, 2), (0, 6, 1), (0, 4, 0)])
src, dst, eid = g.out_edges(
[0, 4, 8], form="all"
) # test node#8 has no out edges
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
assert set(tup) == set([(0,9,2),(0,6,1),(0,4,0),(4,3,10),(4,4,11),(4,9,12),(4,1,9)])
src, dst, eid = g.edges('all', 'eid')
assert set(tup) == set(
[
(0, 9, 2),
(0, 6, 1),
(0, 4, 0),
(4, 3, 10),
(4, 4, 11),
(4, 9, 12),
(4, 1, 9),
]
)
src, dst, eid = g.edges("all", "eid")
t_src, t_dst = edge_pair_input(sort=True)
t_tup = list(zip(t_src, t_dst, list(range(20))))
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
assert set(tup) == set(t_tup)
assert list(F.asnumpy(eid)) == list(range(20))
src, dst, eid = g.edges('all', 'srcdst')
src, dst, eid = g.edges("all", "srcdst")
t_src, t_dst = edge_pair_input(sort=True)
t_tup = list(zip(t_src, t_dst, list(range(20))))
tup = list(zip(F.asnumpy(src), F.asnumpy(dst), F.asnumpy(eid)))
......@@ -188,9 +245,13 @@ def test_query():
assert F.allclose(g.out_degrees([8, 9]), F.tensor([0, 1]))
assert np.array_equal(
F.sparse_to_numpy(g.adjacency_matrix(transpose=True)), scipy_coo_input().toarray().T)
F.sparse_to_numpy(g.adjacency_matrix(transpose=True)),
scipy_coo_input().toarray().T,
)
assert np.array_equal(
F.sparse_to_numpy(g.adjacency_matrix(transpose=False)), scipy_coo_input().toarray())
F.sparse_to_numpy(g.adjacency_matrix(transpose=False)),
scipy_coo_input().toarray(),
)
def _test_csr(g):
# test twice to see whether the cached format works or not
......@@ -199,18 +260,18 @@ def test_query():
def _test_edge_ids():
g = gen_by_mutation()
eids = g.edge_ids([4,0], [4,9])
eids = g.edge_ids([4, 0], [4, 9])
assert eids.shape[0] == 2
eid = g.edge_ids(4, 4)
assert isinstance(eid, numbers.Number)
with pytest.raises(DGLError):
eids = g.edge_ids([9,0], [4,9])
eids = g.edge_ids([9, 0], [4, 9])
with pytest.raises(DGLError):
eid = g.edge_ids(4, 5)
g.add_edges(0, 4)
eids = g.edge_ids([0,0], [4,9])
eids = g.edge_ids([0, 0], [4, 9])
eid = g.edge_ids(0, 4)
_test(gen_by_mutation())
......@@ -224,35 +285,38 @@ def test_query():
_test_csr(gen_from_data(scipy_csr_input(), True, False))
_test_edge_ids()
def test_mutation():
g = dgl.DGLGraph()
g = g.to(F.ctx())
# test add nodes with data
g.add_nodes(5)
g.add_nodes(5, {'h' : F.ones((5, 2))})
g.add_nodes(5, {"h": F.ones((5, 2))})
ans = F.cat([F.zeros((5, 2)), F.ones((5, 2))], 0)
assert F.allclose(ans, g.ndata['h'])
g.ndata['w'] = 2 * F.ones((10, 2))
assert F.allclose(2 * F.ones((10, 2)), g.ndata['w'])
assert F.allclose(ans, g.ndata["h"])
g.ndata["w"] = 2 * F.ones((10, 2))
assert F.allclose(2 * F.ones((10, 2)), g.ndata["w"])
# test add edges with data
g.add_edges([2, 3], [3, 4])
g.add_edges([0, 1], [1, 2], {'m' : F.ones((2, 2))})
g.add_edges([0, 1], [1, 2], {"m": F.ones((2, 2))})
ans = F.cat([F.zeros((2, 2)), F.ones((2, 2))], 0)
assert F.allclose(ans, g.edata['m'])
assert F.allclose(ans, g.edata["m"])
def test_scipy_adjmat():
g = dgl.DGLGraph()
g.add_nodes(10)
g.add_edges(range(9), range(1, 10))
adj_0 = g.adj(scipy_fmt='csr')
adj_1 = g.adj(scipy_fmt='coo')
adj_0 = g.adj(scipy_fmt="csr")
adj_1 = g.adj(scipy_fmt="coo")
assert np.array_equal(adj_0.toarray(), adj_1.toarray())
adj_t0 = g.adj(transpose=False, scipy_fmt='csr')
adj_t_1 = g.adj(transpose=False, scipy_fmt='coo')
adj_t0 = g.adj(transpose=False, scipy_fmt="csr")
adj_t_1 = g.adj(transpose=False, scipy_fmt="coo")
assert np.array_equal(adj_0.toarray(), adj_1.toarray())
def test_incmat():
g = dgl.DGLGraph()
g.add_nodes(4)
......@@ -261,38 +325,64 @@ def test_incmat():
g.add_edges(0, 3) # 2
g.add_edges(2, 3) # 3
g.add_edges(1, 1) # 4
inc_in = F.sparse_to_numpy(g.incidence_matrix('in'))
inc_out = F.sparse_to_numpy(g.incidence_matrix('out'))
inc_both = F.sparse_to_numpy(g.incidence_matrix('both'))
inc_in = F.sparse_to_numpy(g.incidence_matrix("in"))
inc_out = F.sparse_to_numpy(g.incidence_matrix("out"))
inc_both = F.sparse_to_numpy(g.incidence_matrix("both"))
print(inc_in)
print(inc_out)
print(inc_both)
assert np.allclose(
inc_in,
np.array([[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 1.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 1., 0.]]))
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
]
),
)
assert np.allclose(
inc_out,
np.array([[1., 1., 1., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0.]]))
np.array(
[
[1.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
),
)
assert np.allclose(
inc_both,
np.array([[-1., -1., -1., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 1., 0., -1., 0.],
[0., 0., 1., 1., 0.]]))
np.array(
[
[-1.0, -1.0, -1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -1.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
]
),
)
def test_find_edges():
g = dgl.DGLGraph()
g.add_nodes(10)
g.add_edges(range(9), range(1, 10))
e = g.find_edges([1, 3, 2, 4])
assert F.asnumpy(e[0][0]) == 1 and F.asnumpy(e[0][1]) == 3 and F.asnumpy(e[0][2]) == 2 and F.asnumpy(e[0][3]) == 4
assert F.asnumpy(e[1][0]) == 2 and F.asnumpy(e[1][1]) == 4 and F.asnumpy(e[1][2]) == 3 and F.asnumpy(e[1][3]) == 5
assert (
F.asnumpy(e[0][0]) == 1
and F.asnumpy(e[0][1]) == 3
and F.asnumpy(e[0][2]) == 2
and F.asnumpy(e[0][3]) == 4
)
assert (
F.asnumpy(e[1][0]) == 2
and F.asnumpy(e[1][1]) == 4
and F.asnumpy(e[1][2]) == 3
and F.asnumpy(e[1][3]) == 5
)
try:
g.find_edges([10])
......@@ -302,6 +392,7 @@ def test_find_edges():
finally:
assert fail
def test_ismultigraph():
g = dgl.DGLGraph()
g.add_nodes(10)
......@@ -313,6 +404,7 @@ def test_ismultigraph():
g.add_edges([0, 2], [0, 3])
assert g.is_multigraph == True
def test_hypersparse_query():
g = dgl.DGLGraph()
g = g.to(F.ctx())
......@@ -323,14 +415,15 @@ def test_hypersparse_query():
assert not g.has_nodes(1000002)
assert g.edge_ids(0, 1) == 0
src, dst = g.find_edges([0])
src, dst, eid = g.in_edges(1, form='all')
src, dst, eid = g.out_edges(0, form='all')
src, dst, eid = g.in_edges(1, form="all")
src, dst, eid = g.out_edges(0, form="all")
src, dst = g.edges()
assert g.in_degrees(0) == 0
assert g.in_degrees(1) == 1
assert g.out_degrees(0) == 1
assert g.out_degrees(1) == 0
def test_empty_data_initialized():
g = dgl.DGLGraph()
g = g.to(F.ctx())
......@@ -339,6 +432,7 @@ def test_empty_data_initialized():
assert "ha" in g.ndata
assert len(g.ndata["ha"]) == 1
def test_is_sorted():
u_src, u_dst = edge_pair_input(False)
s_src, s_dst = edge_pair_input(True)
......@@ -379,10 +473,10 @@ def test_formats():
try:
g.in_degrees()
g.out_degrees()
g.formats('coo').in_degrees()
g.formats('coo').out_degrees()
g.formats('csc').in_degrees()
g.formats('csr').out_degrees()
g.formats("coo").in_degrees()
g.formats("coo").out_degrees()
g.formats("csc").in_degrees()
g.formats("csr").out_degrees()
fail = False
except DGLError:
fail = True
......@@ -390,7 +484,7 @@ def test_formats():
assert not fail
# in_degrees NOT works if csc available only
try:
g.formats('csc').out_degrees()
g.formats("csc").out_degrees()
fail = True
except DGLError:
fail = False
......@@ -398,14 +492,15 @@ def test_formats():
assert not fail
# out_degrees NOT works if csr available only
try:
g.formats('csr').in_degrees()
g.formats("csr").in_degrees()
fail = True
except DGLError:
fail = False
finally:
assert not fail
if __name__ == '__main__':
if __name__ == "__main__":
test_query()
test_mutation()
test_scipy_adjmat()
......
import backend as F
import numpy as np
from test_utils import parametrize_idtype
import dgl
import numpy as np
from test_utils import parametrize_idtype
@parametrize_idtype
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment