Unverified Commit 74c9d27d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Auto-format tests. (#5313)



* [Misc] Auto-format tests.

* more

---------
Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-28-63.ap-northeast-1.compute.internal>
parent 86193c26
import unittest import unittest
import backend as F import backend as F
import numpy as np
import dgl import dgl
import dgl.ndarray as nd import dgl.ndarray as nd
import numpy as np
@unittest.skipIf( @unittest.skipIf(
......
...@@ -735,11 +735,7 @@ def _test_DefaultDataParser(): ...@@ -735,11 +735,7 @@ def _test_DefaultDataParser():
# string consists of non-numeric values # string consists of non-numeric values
with tempfile.TemporaryDirectory() as test_dir: with tempfile.TemporaryDirectory() as test_dir:
csv_path = os.path.join(test_dir, "nodes.csv") csv_path = os.path.join(test_dir, "nodes.csv")
df = pd.DataFrame( df = pd.DataFrame({"label": ["a", "b", "c"]})
{
"label": ["a", "b", "c"],
}
)
df.to_csv(csv_path, index=False) df.to_csv(csv_path, index=False)
dp = DefaultDataParser() dp = DefaultDataParser()
df = pd.read_csv(csv_path) df = pd.read_csv(csv_path)
...@@ -752,11 +748,7 @@ def _test_DefaultDataParser(): ...@@ -752,11 +748,7 @@ def _test_DefaultDataParser():
# csv has index column which is ignored as it's unnamed # csv has index column which is ignored as it's unnamed
with tempfile.TemporaryDirectory() as test_dir: with tempfile.TemporaryDirectory() as test_dir:
csv_path = os.path.join(test_dir, "nodes.csv") csv_path = os.path.join(test_dir, "nodes.csv")
df = pd.DataFrame( df = pd.DataFrame({"label": [1, 2, 3]})
{
"label": [1, 2, 3],
}
)
df.to_csv(csv_path) df.to_csv(csv_path)
dp = DefaultDataParser() dp = DefaultDataParser()
df = pd.read_csv(csv_path) df = pd.read_csv(csv_path)
...@@ -1042,9 +1034,7 @@ def _test_load_edge_data_from_csv(): ...@@ -1042,9 +1034,7 @@ def _test_load_edge_data_from_csv():
# required headers are missing # required headers are missing
df = pd.DataFrame( df = pd.DataFrame(
{ {"src_id": np.random.randint(num_nodes, size=num_edges)}
"src_id": np.random.randint(num_nodes, size=num_edges),
}
) )
csv_path = os.path.join(test_dir, "edges.csv") csv_path = os.path.join(test_dir, "edges.csv")
df.to_csv(csv_path, index=False) df.to_csv(csv_path, index=False)
...@@ -1056,9 +1046,7 @@ def _test_load_edge_data_from_csv(): ...@@ -1056,9 +1046,7 @@ def _test_load_edge_data_from_csv():
expect_except = True expect_except = True
assert expect_except assert expect_except
df = pd.DataFrame( df = pd.DataFrame(
{ {"dst_id": np.random.randint(num_nodes, size=num_edges)}
"dst_id": np.random.randint(num_nodes, size=num_edges),
}
) )
csv_path = os.path.join(test_dir, "edges.csv") csv_path = os.path.join(test_dir, "edges.csv")
df.to_csv(csv_path, index=False) df.to_csv(csv_path, index=False)
......
...@@ -4,12 +4,12 @@ import time ...@@ -4,12 +4,12 @@ import time
import unittest import unittest
import backend as F import backend as F
import numpy as np
import pytest
import scipy as sp
import dgl import dgl
import dgl.ndarray as nd import dgl.ndarray as nd
import numpy as np
import pytest
import scipy as sp
from dgl import DGLGraph from dgl import DGLGraph
from dgl.data.utils import load_labels, load_tensors, save_tensors from dgl.data.utils import load_labels, load_tensors, save_tensors
......
import unittest import unittest
import backend as F import backend as F
from test_utils import parametrize_idtype
import dgl import dgl
from dgl.dataloading import ( from dgl.dataloading import (
NeighborSampler,
as_edge_prediction_sampler, as_edge_prediction_sampler,
negative_sampler, negative_sampler,
NeighborSampler,
) )
from test_utils import parametrize_idtype
def create_test_graph(idtype): def create_test_graph(idtype):
......
This diff is collapsed.
...@@ -4,18 +4,18 @@ import unittest ...@@ -4,18 +4,18 @@ import unittest
from collections import Counter from collections import Counter
import backend as F import backend as F
import dgl
import dgl.function as fn
import networkx as nx import networkx as nx
import numpy as np import numpy as np
import pytest import pytest
import scipy.sparse as ssp import scipy.sparse as ssp
import test_utils import test_utils
from scipy.sparse import rand
from test_utils import get_cases, parametrize_idtype
import dgl
import dgl.function as fn
from dgl import DGLError from dgl import DGLError
from dgl.ops import edge_softmax from dgl.ops import edge_softmax
from scipy.sparse import rand
from test_utils import get_cases, parametrize_idtype
edge_softmax_shapes = [(1,), (1, 3), (3, 4, 5)] edge_softmax_shapes = [(1,), (1, 3), (3, 4, 5)]
rfuncs = {"sum": fn.sum, "max": fn.max, "min": fn.min, "mean": fn.mean} rfuncs = {"sum": fn.sum, "max": fn.max, "min": fn.min, "mean": fn.mean}
......
import unittest
import backend as F
import dgl import dgl
import numpy as np import numpy as np
import backend as F
import unittest
from test_utils import parametrize_idtype from test_utils import parametrize_idtype
def tree1(idtype): def tree1(idtype):
"""Generate a tree """Generate a tree
0 0
...@@ -19,10 +22,11 @@ def tree1(idtype): ...@@ -19,10 +22,11 @@ def tree1(idtype):
g.add_edges(4, 1) g.add_edges(4, 1)
g.add_edges(1, 0) g.add_edges(1, 0)
g.add_edges(2, 0) g.add_edges(2, 0)
g.ndata['h'] = F.tensor([0, 1, 2, 3, 4]) g.ndata["h"] = F.tensor([0, 1, 2, 3, 4])
g.edata['h'] = F.randn((4, 10)) g.edata["h"] = F.randn((4, 10))
return g return g
def tree2(idtype): def tree2(idtype):
"""Generate a tree """Generate a tree
1 1
...@@ -38,10 +42,11 @@ def tree2(idtype): ...@@ -38,10 +42,11 @@ def tree2(idtype):
g.add_edges(0, 4) g.add_edges(0, 4)
g.add_edges(4, 1) g.add_edges(4, 1)
g.add_edges(3, 1) g.add_edges(3, 1)
g.ndata['h'] = F.tensor([0, 1, 2, 3, 4]) g.ndata["h"] = F.tensor([0, 1, 2, 3, 4])
g.edata['h'] = F.randn((4, 10)) g.edata["h"] = F.randn((4, 10))
return g return g
@parametrize_idtype @parametrize_idtype
def test_batch_unbatch(idtype): def test_batch_unbatch(idtype):
t1 = tree1(idtype) t1 = tree1(idtype)
...@@ -55,10 +60,11 @@ def test_batch_unbatch(idtype): ...@@ -55,10 +60,11 @@ def test_batch_unbatch(idtype):
assert F.allclose(bg.batch_num_edges(), F.tensor([4, 4])) assert F.allclose(bg.batch_num_edges(), F.tensor([4, 4]))
tt1, tt2 = dgl.unbatch(bg) tt1, tt2 = dgl.unbatch(bg)
assert F.allclose(t1.ndata['h'], tt1.ndata['h']) assert F.allclose(t1.ndata["h"], tt1.ndata["h"])
assert F.allclose(t1.edata['h'], tt1.edata['h']) assert F.allclose(t1.edata["h"], tt1.edata["h"])
assert F.allclose(t2.ndata['h'], tt2.ndata['h']) assert F.allclose(t2.ndata["h"], tt2.ndata["h"])
assert F.allclose(t2.edata['h'], tt2.edata['h']) assert F.allclose(t2.edata["h"], tt2.edata["h"])
@parametrize_idtype @parametrize_idtype
def test_batch_unbatch1(idtype): def test_batch_unbatch1(idtype):
...@@ -73,14 +79,18 @@ def test_batch_unbatch1(idtype): ...@@ -73,14 +79,18 @@ def test_batch_unbatch1(idtype):
assert F.allclose(b2.batch_num_edges(), F.tensor([4, 4, 4])) assert F.allclose(b2.batch_num_edges(), F.tensor([4, 4, 4]))
s1, s2, s3 = dgl.unbatch(b2) s1, s2, s3 = dgl.unbatch(b2)
assert F.allclose(t2.ndata['h'], s1.ndata['h']) assert F.allclose(t2.ndata["h"], s1.ndata["h"])
assert F.allclose(t2.edata['h'], s1.edata['h']) assert F.allclose(t2.edata["h"], s1.edata["h"])
assert F.allclose(t1.ndata['h'], s2.ndata['h']) assert F.allclose(t1.ndata["h"], s2.ndata["h"])
assert F.allclose(t1.edata['h'], s2.edata['h']) assert F.allclose(t1.edata["h"], s2.edata["h"])
assert F.allclose(t2.ndata['h'], s3.ndata['h']) assert F.allclose(t2.ndata["h"], s3.ndata["h"])
assert F.allclose(t2.edata['h'], s3.edata['h']) assert F.allclose(t2.edata["h"], s3.edata["h"])
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support inplace update")
@unittest.skipIf(
dgl.backend.backend_name == "tensorflow",
reason="TF doesn't support inplace update",
)
@parametrize_idtype @parametrize_idtype
def test_batch_unbatch_frame(idtype): def test_batch_unbatch_frame(idtype):
"""Test module of node/edge frames of batched/unbatched DGLGraphs. """Test module of node/edge frames of batched/unbatched DGLGraphs.
...@@ -93,30 +103,31 @@ def test_batch_unbatch_frame(idtype): ...@@ -93,30 +103,31 @@ def test_batch_unbatch_frame(idtype):
N2 = t2.number_of_nodes() N2 = t2.number_of_nodes()
E2 = t2.number_of_edges() E2 = t2.number_of_edges()
D = 10 D = 10
t1.ndata['h'] = F.randn((N1, D)) t1.ndata["h"] = F.randn((N1, D))
t1.edata['h'] = F.randn((E1, D)) t1.edata["h"] = F.randn((E1, D))
t2.ndata['h'] = F.randn((N2, D)) t2.ndata["h"] = F.randn((N2, D))
t2.edata['h'] = F.randn((E2, D)) t2.edata["h"] = F.randn((E2, D))
b1 = dgl.batch([t1, t2]) b1 = dgl.batch([t1, t2])
b2 = dgl.batch([t2]) b2 = dgl.batch([t2])
b1.ndata['h'][:N1] = F.zeros((N1, D)) b1.ndata["h"][:N1] = F.zeros((N1, D))
b1.edata['h'][:E1] = F.zeros((E1, D)) b1.edata["h"][:E1] = F.zeros((E1, D))
b2.ndata['h'][:N2] = F.zeros((N2, D)) b2.ndata["h"][:N2] = F.zeros((N2, D))
b2.edata['h'][:E2] = F.zeros((E2, D)) b2.edata["h"][:E2] = F.zeros((E2, D))
assert not F.allclose(t1.ndata['h'], F.zeros((N1, D))) assert not F.allclose(t1.ndata["h"], F.zeros((N1, D)))
assert not F.allclose(t1.edata['h'], F.zeros((E1, D))) assert not F.allclose(t1.edata["h"], F.zeros((E1, D)))
assert not F.allclose(t2.ndata['h'], F.zeros((N2, D))) assert not F.allclose(t2.ndata["h"], F.zeros((N2, D)))
assert not F.allclose(t2.edata['h'], F.zeros((E2, D))) assert not F.allclose(t2.edata["h"], F.zeros((E2, D)))
g1, g2 = dgl.unbatch(b1) g1, g2 = dgl.unbatch(b1)
_g2, = dgl.unbatch(b2) (_g2,) = dgl.unbatch(b2)
assert F.allclose(g1.ndata['h'], F.zeros((N1, D))) assert F.allclose(g1.ndata["h"], F.zeros((N1, D)))
assert F.allclose(g1.edata['h'], F.zeros((E1, D))) assert F.allclose(g1.edata["h"], F.zeros((E1, D)))
assert F.allclose(g2.ndata['h'], t2.ndata['h']) assert F.allclose(g2.ndata["h"], t2.ndata["h"])
assert F.allclose(g2.edata['h'], t2.edata['h']) assert F.allclose(g2.edata["h"], t2.edata["h"])
assert F.allclose(_g2.ndata['h'], F.zeros((N2, D))) assert F.allclose(_g2.ndata["h"], F.zeros((N2, D)))
assert F.allclose(_g2.edata['h'], F.zeros((E2, D))) assert F.allclose(_g2.edata["h"], F.zeros((E2, D)))
@parametrize_idtype @parametrize_idtype
def test_batch_unbatch2(idtype): def test_batch_unbatch2(idtype):
...@@ -128,10 +139,11 @@ def test_batch_unbatch2(idtype): ...@@ -128,10 +139,11 @@ def test_batch_unbatch2(idtype):
b.add_nodes(3) b.add_nodes(3)
b.add_edges(0, [1, 2]) b.add_edges(0, [1, 2])
c = dgl.batch([a, b]) c = dgl.batch([a, b])
c.ndata['h'] = F.ones((7, 1)) c.ndata["h"] = F.ones((7, 1))
c.edata['w'] = F.ones((5, 1)) c.edata["w"] = F.ones((5, 1))
assert F.allclose(c.ndata['h'], F.ones((7, 1))) assert F.allclose(c.ndata["h"], F.ones((7, 1)))
assert F.allclose(c.edata['w'], F.ones((5, 1))) assert F.allclose(c.edata["w"], F.ones((5, 1)))
@parametrize_idtype @parametrize_idtype
def test_batch_send_and_recv(idtype): def test_batch_send_and_recv(idtype):
...@@ -139,16 +151,17 @@ def test_batch_send_and_recv(idtype): ...@@ -139,16 +151,17 @@ def test_batch_send_and_recv(idtype):
t2 = tree2(idtype) t2 = tree2(idtype)
bg = dgl.batch([t1, t2]) bg = dgl.batch([t1, t2])
_mfunc = lambda edges: {'m' : edges.src['h']} _mfunc = lambda edges: {"m": edges.src["h"]}
_rfunc = lambda nodes: {'h' : F.sum(nodes.mailbox['m'], 1)} _rfunc = lambda nodes: {"h": F.sum(nodes.mailbox["m"], 1)}
u = [3, 4, 2 + 5, 0 + 5] u = [3, 4, 2 + 5, 0 + 5]
v = [1, 1, 4 + 5, 4 + 5] v = [1, 1, 4 + 5, 4 + 5]
bg.send_and_recv((u, v), _mfunc, _rfunc) bg.send_and_recv((u, v), _mfunc, _rfunc)
t1, t2 = dgl.unbatch(bg) t1, t2 = dgl.unbatch(bg)
assert F.asnumpy(t1.ndata['h'][1]) == 7 assert F.asnumpy(t1.ndata["h"][1]) == 7
assert F.asnumpy(t2.ndata['h'][4]) == 2 assert F.asnumpy(t2.ndata["h"][4]) == 2
@parametrize_idtype @parametrize_idtype
def test_batch_propagate(idtype): def test_batch_propagate(idtype):
...@@ -156,8 +169,8 @@ def test_batch_propagate(idtype): ...@@ -156,8 +169,8 @@ def test_batch_propagate(idtype):
t2 = tree2(idtype) t2 = tree2(idtype)
bg = dgl.batch([t1, t2]) bg = dgl.batch([t1, t2])
_mfunc = lambda edges: {'m' : edges.src['h']} _mfunc = lambda edges: {"m": edges.src["h"]}
_rfunc = lambda nodes: {'h' : F.sum(nodes.mailbox['m'], 1)} _rfunc = lambda nodes: {"h": F.sum(nodes.mailbox["m"], 1)}
# get leaves. # get leaves.
order = [] order = []
...@@ -175,8 +188,9 @@ def test_batch_propagate(idtype): ...@@ -175,8 +188,9 @@ def test_batch_propagate(idtype):
bg.prop_edges(order, _mfunc, _rfunc) bg.prop_edges(order, _mfunc, _rfunc)
t1, t2 = dgl.unbatch(bg) t1, t2 = dgl.unbatch(bg)
assert F.asnumpy(t1.ndata['h'][0]) == 9 assert F.asnumpy(t1.ndata["h"][0]) == 9
assert F.asnumpy(t2.ndata['h'][1]) == 5 assert F.asnumpy(t2.ndata["h"][1]) == 5
@parametrize_idtype @parametrize_idtype
def test_batched_edge_ordering(idtype): def test_batched_edge_ordering(idtype):
...@@ -184,17 +198,18 @@ def test_batched_edge_ordering(idtype): ...@@ -184,17 +198,18 @@ def test_batched_edge_ordering(idtype):
g1.add_nodes(6) g1.add_nodes(6)
g1.add_edges([4, 4, 2, 2, 0], [5, 3, 3, 1, 1]) g1.add_edges([4, 4, 2, 2, 0], [5, 3, 3, 1, 1])
e1 = F.randn((5, 10)) e1 = F.randn((5, 10))
g1.edata['h'] = e1 g1.edata["h"] = e1
g2 = dgl.graph(([], [])).astype(idtype).to(F.ctx()) g2 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g2.add_nodes(6) g2.add_nodes(6)
g2.add_edges([0, 1 ,2 ,5, 4 ,5], [1, 2, 3, 4, 3, 0]) g2.add_edges([0, 1, 2, 5, 4, 5], [1, 2, 3, 4, 3, 0])
e2 = F.randn((6, 10)) e2 = F.randn((6, 10))
g2.edata['h'] = e2 g2.edata["h"] = e2
g = dgl.batch([g1, g2]) g = dgl.batch([g1, g2])
r1 = g.edata['h'][g.edge_ids(4, 5)] r1 = g.edata["h"][g.edge_ids(4, 5)]
r2 = g1.edata['h'][g1.edge_ids(4, 5)] r2 = g1.edata["h"][g1.edge_ids(4, 5)]
assert F.array_equal(r1, r2) assert F.array_equal(r1, r2)
@parametrize_idtype @parametrize_idtype
def test_batch_no_edge(idtype): def test_batch_no_edge(idtype):
g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx()) g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
...@@ -202,22 +217,24 @@ def test_batch_no_edge(idtype): ...@@ -202,22 +217,24 @@ def test_batch_no_edge(idtype):
g1.add_edges([4, 4, 2, 2, 0], [5, 3, 3, 1, 1]) g1.add_edges([4, 4, 2, 2, 0], [5, 3, 3, 1, 1])
g2 = dgl.graph(([], [])).astype(idtype).to(F.ctx()) g2 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g2.add_nodes(6) g2.add_nodes(6)
g2.add_edges([0, 1, 2, 5, 4, 5], [1 ,2 ,3, 4, 3, 0]) g2.add_edges([0, 1, 2, 5, 4, 5], [1, 2, 3, 4, 3, 0])
g3 = dgl.graph(([], [])).astype(idtype).to(F.ctx()) g3 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g3.add_nodes(1) # no edges g3.add_nodes(1) # no edges
g = dgl.batch([g1, g3, g2]) # should not throw an error g = dgl.batch([g1, g3, g2]) # should not throw an error
@parametrize_idtype @parametrize_idtype
def test_batch_keeps_empty_data(idtype): def test_batch_keeps_empty_data(idtype):
g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx()) g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g1.ndata["nh"] = F.tensor([]) g1.ndata["nh"] = F.tensor([])
g1.edata["eh"] = F.tensor([]) g1.edata["eh"] = F.tensor([])
g2 = dgl.graph(([], [])).astype(idtype).to(F.ctx()) g2 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g2.ndata["nh"] = F.tensor([]) g2.ndata["nh"] = F.tensor([])
g2.edata["eh"] = F.tensor([]) g2.edata["eh"] = F.tensor([])
g = dgl.batch([g1, g2]) g = dgl.batch([g1, g2])
assert "nh" in g.ndata assert "nh" in g.ndata
assert "eh" in g.edata assert "eh" in g.edata
def _get_subgraph_batch_info(keys, induced_indices_arr, batch_num_objs): def _get_subgraph_batch_info(keys, induced_indices_arr, batch_num_objs):
"""Internal function to compute batch information for subgraphs. """Internal function to compute batch information for subgraphs.
...@@ -235,12 +252,16 @@ def _get_subgraph_batch_info(keys, induced_indices_arr, batch_num_objs): ...@@ -235,12 +252,16 @@ def _get_subgraph_batch_info(keys, induced_indices_arr, batch_num_objs):
A dictionary mapping all node/edge type keys to the ``batch_num_objs`` A dictionary mapping all node/edge type keys to the ``batch_num_objs``
array of corresponding graph. array of corresponding graph.
""" """
bucket_offset = np.expand_dims(np.cumsum(F.asnumpy(batch_num_objs), 0), -1) # (num_bkts, 1) bucket_offset = np.expand_dims(
np.cumsum(F.asnumpy(batch_num_objs), 0), -1
) # (num_bkts, 1)
ret = {} ret = {}
for key, induced_indices in zip(keys, induced_indices_arr): for key, induced_indices in zip(keys, induced_indices_arr):
# NOTE(Zihao): this implementation is not efficient and we can replace it with # NOTE(Zihao): this implementation is not efficient and we can replace it with
# binary search in the future. # binary search in the future.
induced_indices = np.expand_dims(F.asnumpy(induced_indices), 0) # (1, num_nodes) induced_indices = np.expand_dims(
F.asnumpy(induced_indices), 0
) # (1, num_nodes)
new_offset = np.sum((induced_indices < bucket_offset), 1) # (num_bkts,) new_offset = np.sum((induced_indices < bucket_offset), 1) # (num_bkts,)
# start_offset = [0] + [new_offset[i-1] for i in range(1, n_bkts)] # start_offset = [0] + [new_offset[i-1] for i in range(1, n_bkts)]
start_offset = np.concatenate([np.zeros((1,)), new_offset[:-1]], 0) start_offset = np.concatenate([np.zeros((1,)), new_offset[:-1]], 0)
...@@ -248,6 +269,7 @@ def _get_subgraph_batch_info(keys, induced_indices_arr, batch_num_objs): ...@@ -248,6 +269,7 @@ def _get_subgraph_batch_info(keys, induced_indices_arr, batch_num_objs):
ret[key] = F.tensor(new_batch_num_objs, dtype=F.dtype(batch_num_objs)) ret[key] = F.tensor(new_batch_num_objs, dtype=F.dtype(batch_num_objs))
return ret return ret
@parametrize_idtype @parametrize_idtype
def test_set_batch_info(idtype): def test_set_batch_info(idtype):
ctx = F.ctx() ctx = F.ctx()
...@@ -257,13 +279,17 @@ def test_set_batch_info(idtype): ...@@ -257,13 +279,17 @@ def test_set_batch_info(idtype):
bg = dgl.batch([g1, g2]) bg = dgl.batch([g1, g2])
batch_num_nodes = F.astype(bg.batch_num_nodes(), idtype) batch_num_nodes = F.astype(bg.batch_num_nodes(), idtype)
batch_num_edges = F.astype(bg.batch_num_edges(), idtype) batch_num_edges = F.astype(bg.batch_num_edges(), idtype)
# test homogeneous node subgraph # test homogeneous node subgraph
sg_n = dgl.node_subgraph(bg, list(range(10, 20)) + list(range(50, 60))) sg_n = dgl.node_subgraph(bg, list(range(10, 20)) + list(range(50, 60)))
induced_nodes = sg_n.ndata['_ID'] induced_nodes = sg_n.ndata["_ID"]
induced_edges = sg_n.edata['_ID'] induced_edges = sg_n.edata["_ID"]
new_batch_num_nodes = _get_subgraph_batch_info(bg.ntypes, [induced_nodes], batch_num_nodes) new_batch_num_nodes = _get_subgraph_batch_info(
new_batch_num_edges = _get_subgraph_batch_info(bg.canonical_etypes, [induced_edges], batch_num_edges) bg.ntypes, [induced_nodes], batch_num_nodes
)
new_batch_num_edges = _get_subgraph_batch_info(
bg.canonical_etypes, [induced_edges], batch_num_edges
)
sg_n.set_batch_num_nodes(new_batch_num_nodes) sg_n.set_batch_num_nodes(new_batch_num_nodes)
sg_n.set_batch_num_edges(new_batch_num_edges) sg_n.set_batch_num_edges(new_batch_num_edges)
subg_n1, subg_n2 = dgl.unbatch(sg_n) subg_n1, subg_n2 = dgl.unbatch(sg_n)
...@@ -273,11 +299,17 @@ def test_set_batch_info(idtype): ...@@ -273,11 +299,17 @@ def test_set_batch_info(idtype):
assert subg_n2.num_edges() == subg2.num_edges() assert subg_n2.num_edges() == subg2.num_edges()
# test homogeneous edge subgraph # test homogeneous edge subgraph
sg_e = dgl.edge_subgraph(bg, list(range(40, 70)) + list(range(150, 200)), relabel_nodes=False) sg_e = dgl.edge_subgraph(
bg, list(range(40, 70)) + list(range(150, 200)), relabel_nodes=False
)
induced_nodes = F.arange(0, bg.num_nodes(), idtype) induced_nodes = F.arange(0, bg.num_nodes(), idtype)
induced_edges = sg_e.edata['_ID'] induced_edges = sg_e.edata["_ID"]
new_batch_num_nodes = _get_subgraph_batch_info(bg.ntypes, [induced_nodes], batch_num_nodes) new_batch_num_nodes = _get_subgraph_batch_info(
new_batch_num_edges = _get_subgraph_batch_info(bg.canonical_etypes, [induced_edges], batch_num_edges) bg.ntypes, [induced_nodes], batch_num_nodes
)
new_batch_num_edges = _get_subgraph_batch_info(
bg.canonical_etypes, [induced_edges], batch_num_edges
)
sg_e.set_batch_num_nodes(new_batch_num_nodes) sg_e.set_batch_num_nodes(new_batch_num_nodes)
sg_e.set_batch_num_edges(new_batch_num_edges) sg_e.set_batch_num_edges(new_batch_num_edges)
subg_e1, subg_e2 = dgl.unbatch(sg_e) subg_e1, subg_e2 = dgl.unbatch(sg_e)
...@@ -287,15 +319,14 @@ def test_set_batch_info(idtype): ...@@ -287,15 +319,14 @@ def test_set_batch_info(idtype):
assert subg_e2.num_nodes() == subg2.num_nodes() assert subg_e2.num_nodes() == subg2.num_nodes()
if __name__ == '__main__': if __name__ == "__main__":
#test_batch_unbatch() # test_batch_unbatch()
#test_batch_unbatch1() # test_batch_unbatch1()
#test_batch_unbatch_frame() # test_batch_unbatch_frame()
#test_batch_unbatch2() # test_batch_unbatch2()
#test_batched_edge_ordering() # test_batched_edge_ordering()
#test_batch_send_then_recv() # test_batch_send_then_recv()
#test_batch_send_and_recv() # test_batch_send_and_recv()
#test_batch_propagate() # test_batch_propagate()
#test_batch_no_edge() # test_batch_no_edge()
test_set_batch_info(F.int32) test_set_batch_info(F.int32)
...@@ -2,10 +2,10 @@ import os ...@@ -2,10 +2,10 @@ import os
import unittest import unittest
import backend as F import backend as F
import numpy as np
import pytest
import dgl import dgl
import numpy as np
import pytest
@unittest.skipIf(os.name == "nt", reason="Cython only works on linux") @unittest.skipIf(os.name == "nt", reason="Cython only works on linux")
......
...@@ -2,12 +2,12 @@ import pickle ...@@ -2,12 +2,12 @@ import pickle
import unittest import unittest
import backend as F import backend as F
import numpy as np
from test_utils import parametrize_idtype
import dgl import dgl
import dgl.ndarray as nd import dgl.ndarray as nd
import numpy as np
from dgl.frame import Column from dgl.frame import Column
from test_utils import parametrize_idtype
def test_column_subcolumn(): def test_column_subcolumn():
......
import unittest import unittest
import backend as F import backend as F
import numpy as np
import dgl import dgl
import numpy as np
@unittest.skipIf( @unittest.skipIf(
......
...@@ -4,18 +4,18 @@ from collections import Counter ...@@ -4,18 +4,18 @@ from collections import Counter
from itertools import product from itertools import product
import backend as F import backend as F
import dgl
import dgl.function as fn
import networkx as nx import networkx as nx
import numpy as np import numpy as np
import pytest import pytest
import scipy.sparse as ssp import scipy.sparse as ssp
import test_utils import test_utils
from dgl import DGLError
from scipy.sparse import rand from scipy.sparse import rand
from test_utils import get_cases, parametrize_idtype from test_utils import get_cases, parametrize_idtype
import dgl
import dgl.function as fn
from dgl import DGLError
rfuncs = {"sum": fn.sum, "max": fn.max, "min": fn.min, "mean": fn.mean} rfuncs = {"sum": fn.sum, "max": fn.max, "min": fn.min, "mean": fn.mean}
fill_value = {"sum": 0, "max": float("-inf")} fill_value = {"sum": 0, "max": float("-inf")}
feat_size = 2 feat_size = 2
...@@ -51,7 +51,6 @@ def create_test_heterograph(idtype): ...@@ -51,7 +51,6 @@ def create_test_heterograph(idtype):
@parametrize_idtype @parametrize_idtype
def test_unary_copy_u(idtype): def test_unary_copy_u(idtype):
def _test(mfunc): def _test(mfunc):
g = create_test_heterograph(idtype) g = create_test_heterograph(idtype)
x1 = F.randn((g.num_nodes("user"), feat_size)) x1 = F.randn((g.num_nodes("user"), feat_size))
...@@ -108,7 +107,6 @@ def test_unary_copy_u(idtype): ...@@ -108,7 +107,6 @@ def test_unary_copy_u(idtype):
@parametrize_idtype @parametrize_idtype
def test_unary_copy_e(idtype): def test_unary_copy_e(idtype):
def _test(mfunc): def _test(mfunc):
g = create_test_heterograph(idtype) g = create_test_heterograph(idtype)
feat_size = 2 feat_size = 2
...@@ -168,7 +166,6 @@ def test_unary_copy_e(idtype): ...@@ -168,7 +166,6 @@ def test_unary_copy_e(idtype):
@parametrize_idtype @parametrize_idtype
def test_binary_op(idtype): def test_binary_op(idtype):
def _test(lhs, rhs, binary_op): def _test(lhs, rhs, binary_op):
g = create_test_heterograph(idtype) g = create_test_heterograph(idtype)
n1 = F.randn((g.num_nodes("user"), feat_size)) n1 = F.randn((g.num_nodes("user"), feat_size))
...@@ -237,6 +234,7 @@ def test_binary_op(idtype): ...@@ -237,6 +234,7 @@ def test_binary_op(idtype):
loss = F.sum(r2.view(-1), 0) loss = F.sum(r2.view(-1), 0)
F.backward(loss) F.backward(loss)
n_grad2 = F.grad(g.nodes["game"].data["h"]) n_grad2 = F.grad(g.nodes["game"].data["h"])
# correctness check # correctness check
def _print_error(a, b): def _print_error(a, b):
for i, (x, y) in enumerate( for i, (x, y) in enumerate(
......
from itertools import product
import backend as F
import dgl import dgl
import dgl.function as fn import dgl.function as fn
import networkx as nx import networkx as nx
import numpy as np import numpy as np
import backend as F
from itertools import product
from test_utils import parametrize_idtype, get_cases
import pytest import pytest
from test_utils import get_cases, parametrize_idtype
def udf_copy_src(edges): def udf_copy_src(edges):
return {'m': edges.src['u']} return {"m": edges.src["u"]}
def udf_copy_edge(edges): def udf_copy_edge(edges):
return {'m': edges.data['e']} return {"m": edges.data["e"]}
def udf_mean(nodes): def udf_mean(nodes):
return {'r2': F.mean(nodes.mailbox['m'], 1)} return {"r2": F.mean(nodes.mailbox["m"], 1)}
def udf_sum(nodes): def udf_sum(nodes):
return {'r2': F.sum(nodes.mailbox['m'], 1)} return {"r2": F.sum(nodes.mailbox["m"], 1)}
def udf_max(nodes): def udf_max(nodes):
return {'r2': F.max(nodes.mailbox['m'], 1)} return {"r2": F.max(nodes.mailbox["m"], 1)}
D1 = 5 D1 = 5
D2 = 3 D2 = 3
D3 = 4 D3 = 4
D4 = 10 # NOTE(xiang): used to dot feature vector D4 = 10 # NOTE(xiang): used to dot feature vector
builtin = {'sum': fn.sum, 'max': fn.max, 'mean': fn.mean} builtin = {"sum": fn.sum, "max": fn.max, "mean": fn.mean}
udf_reduce = {'sum': udf_sum, 'max': udf_max, 'mean': udf_mean} udf_reduce = {"sum": udf_sum, "max": udf_max, "mean": udf_mean}
fill_value = {'sum': 0, 'max': float("-inf")} fill_value = {"sum": 0, "max": float("-inf")}
def generate_feature(g, broadcast='none', binary_op='none'): def generate_feature(g, broadcast="none", binary_op="none"):
"""Create graph with src, edge, dst feature. broadcast can be 'u', """Create graph with src, edge, dst feature. broadcast can be 'u',
'e', 'v', 'none' 'e', 'v', 'none'
""" """
np.random.seed(31) np.random.seed(31)
nv = g.number_of_nodes() nv = g.number_of_nodes()
ne = g.number_of_edges() ne = g.number_of_edges()
if binary_op == 'dot': if binary_op == "dot":
if broadcast == 'e': if broadcast == "e":
u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4))) u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D2, 1, D4))) e = F.tensor(np.random.uniform(-1, 1, (ne, D2, 1, D4)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4))) v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4)))
elif broadcast == 'u': elif broadcast == "u":
u = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1, D4))) u = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1, D4)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3, D4))) e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3, D4)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4))) v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4)))
elif broadcast == 'v': elif broadcast == "v":
u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4))) u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3, D4))) e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3, D4)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1, D4))) v = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1, D4)))
...@@ -57,15 +64,15 @@ def generate_feature(g, broadcast='none', binary_op='none'): ...@@ -57,15 +64,15 @@ def generate_feature(g, broadcast='none', binary_op='none'):
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3, D4))) e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3, D4)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4))) v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3, D4)))
else: else:
if broadcast == 'e': if broadcast == "e":
u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3))) u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D2, 1))) e = F.tensor(np.random.uniform(-1, 1, (ne, D2, 1)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3))) v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
elif broadcast == 'u': elif broadcast == "u":
u = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1))) u = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3))) e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3))) v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
elif broadcast == 'v': elif broadcast == "v":
u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3))) u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3))) e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1))) v = F.tensor(np.random.uniform(-1, 1, (nv, D2, 1)))
...@@ -73,7 +80,11 @@ def generate_feature(g, broadcast='none', binary_op='none'): ...@@ -73,7 +80,11 @@ def generate_feature(g, broadcast='none', binary_op='none'):
u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3))) u = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3))) e = F.tensor(np.random.uniform(-1, 1, (ne, D1, D2, D3)))
v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3))) v = F.tensor(np.random.uniform(-1, 1, (nv, D1, D2, D3)))
return F.astype(u, F.float32), F.astype(v, F.float32), F.astype(e, F.float32) return (
F.astype(u, F.float32),
F.astype(v, F.float32),
F.astype(e, F.float32),
)
def test_copy_src_reduce(): def test_copy_src_reduce():
...@@ -83,60 +94,65 @@ def test_copy_src_reduce(): ...@@ -83,60 +94,65 @@ def test_copy_src_reduce():
# https://github.com/dmlc/dgl/issues/761 # https://github.com/dmlc/dgl/issues/761
g.add_edges(g.nodes(), g.nodes()) g.add_edges(g.nodes(), g.nodes())
g = g.to(F.ctx()) g = g.to(F.ctx())
hu, hv, he = generate_feature(g, 'none', 'none') hu, hv, he = generate_feature(g, "none", "none")
if partial: if partial:
nid = F.tensor(list(range(0, 100, 2)), g.idtype) nid = F.tensor(list(range(0, 100, 2)), g.idtype)
g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv)) g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he)) g.edata["e"] = F.attach_grad(F.clone(he))
with F.record_grad(): with F.record_grad():
if partial: if partial:
g.pull(nid, fn.copy_u(u='u', out='m'), g.pull(
builtin[red](msg='m', out='r1')) nid,
fn.copy_u(u="u", out="m"),
builtin[red](msg="m", out="r1"),
)
else: else:
g.update_all(fn.copy_u(u='u', out='m'), g.update_all(
builtin[red](msg='m', out='r1')) fn.copy_u(u="u", out="m"), builtin[red](msg="m", out="r1")
r1 = g.ndata['r1'] )
r1 = g.ndata["r1"]
F.backward(F.reduce_sum(r1)) F.backward(F.reduce_sum(r1))
n_grad1 = F.grad(g.ndata['u']) n_grad1 = F.grad(g.ndata["u"])
# reset grad # reset grad
g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv)) g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he)) g.edata["e"] = F.attach_grad(F.clone(he))
with F.record_grad(): with F.record_grad():
if partial: if partial:
g.pull(nid, udf_copy_src, udf_reduce[red]) g.pull(nid, udf_copy_src, udf_reduce[red])
else: else:
g.update_all(udf_copy_src, udf_reduce[red]) g.update_all(udf_copy_src, udf_reduce[red])
r2 = g.ndata['r2'] r2 = g.ndata["r2"]
F.backward(F.reduce_sum(r2)) F.backward(F.reduce_sum(r2))
n_grad2 = F.grad(g.ndata['u']) n_grad2 = F.grad(g.ndata["u"])
def _print_error(a, b): def _print_error(a, b):
print("ERROR: Test copy_src_{} partial: {}". print("ERROR: Test copy_src_{} partial: {}".format(red, partial))
format(red, partial)) for i, (x, y) in enumerate(
for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())
):
if not np.allclose(x, y): if not np.allclose(x, y):
print('@{} {} v.s. {}'.format(i, x, y)) print("@{} {} v.s. {}".format(i, x, y))
if not F.allclose(r1, r2): if not F.allclose(r1, r2):
_print_error(r1, r2) _print_error(r1, r2)
assert F.allclose(r1, r2) assert F.allclose(r1, r2)
if not F.allclose(n_grad1, n_grad2): if not F.allclose(n_grad1, n_grad2):
print('node grad') print("node grad")
_print_error(n_grad1, n_grad2) _print_error(n_grad1, n_grad2)
assert(F.allclose(n_grad1, n_grad2)) assert F.allclose(n_grad1, n_grad2)
_test('sum', False) _test("sum", False)
_test('max', False) _test("max", False)
_test('mean', False) _test("mean", False)
_test('sum', True) _test("sum", True)
_test('max', True) _test("max", True)
_test('mean', True) _test("mean", True)
def test_copy_edge_reduce(): def test_copy_edge_reduce():
...@@ -145,80 +161,85 @@ def test_copy_edge_reduce(): ...@@ -145,80 +161,85 @@ def test_copy_edge_reduce():
# NOTE(zihao): add self-loop to avoid zero-degree nodes. # NOTE(zihao): add self-loop to avoid zero-degree nodes.
g.add_edges(g.nodes(), g.nodes()) g.add_edges(g.nodes(), g.nodes())
g = g.to(F.ctx()) g = g.to(F.ctx())
hu, hv, he = generate_feature(g, 'none', 'none') hu, hv, he = generate_feature(g, "none", "none")
if partial: if partial:
nid = F.tensor(list(range(0, 100, 2)), g.idtype) nid = F.tensor(list(range(0, 100, 2)), g.idtype)
g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv)) g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he)) g.edata["e"] = F.attach_grad(F.clone(he))
with F.record_grad(): with F.record_grad():
if partial: if partial:
g.pull(nid, fn.copy_e(e='e', out='m'), g.pull(
builtin[red](msg='m', out='r1')) nid,
fn.copy_e(e="e", out="m"),
builtin[red](msg="m", out="r1"),
)
else: else:
g.update_all(fn.copy_e(e='e', out='m'), g.update_all(
builtin[red](msg='m', out='r1')) fn.copy_e(e="e", out="m"), builtin[red](msg="m", out="r1")
r1 = g.ndata['r1'] )
r1 = g.ndata["r1"]
F.backward(F.reduce_sum(r1)) F.backward(F.reduce_sum(r1))
e_grad1 = F.grad(g.edata['e']) e_grad1 = F.grad(g.edata["e"])
# reset grad # reset grad
g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv)) g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he)) g.edata["e"] = F.attach_grad(F.clone(he))
with F.record_grad(): with F.record_grad():
if partial: if partial:
g.pull(nid, udf_copy_edge, udf_reduce[red]) g.pull(nid, udf_copy_edge, udf_reduce[red])
else: else:
g.update_all(udf_copy_edge, udf_reduce[red]) g.update_all(udf_copy_edge, udf_reduce[red])
r2 = g.ndata['r2'] r2 = g.ndata["r2"]
F.backward(F.reduce_sum(r2)) F.backward(F.reduce_sum(r2))
e_grad2 = F.grad(g.edata['e']) e_grad2 = F.grad(g.edata["e"])
def _print_error(a, b): def _print_error(a, b):
print("ERROR: Test copy_edge_{} partial: {}". print("ERROR: Test copy_edge_{} partial: {}".format(red, partial))
format(red, partial))
return return
for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): for i, (x, y) in enumerate(
zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())
):
if not np.allclose(x, y): if not np.allclose(x, y):
print('@{} {} v.s. {}'.format(i, x, y)) print("@{} {} v.s. {}".format(i, x, y))
if not F.allclose(r1, r2): if not F.allclose(r1, r2):
_print_error(r1, r2) _print_error(r1, r2)
assert F.allclose(r1, r2) assert F.allclose(r1, r2)
if not F.allclose(e_grad1, e_grad2): if not F.allclose(e_grad1, e_grad2):
print('edge gradient') print("edge gradient")
_print_error(e_grad1, e_grad2) _print_error(e_grad1, e_grad2)
assert(F.allclose(e_grad1, e_grad2)) assert F.allclose(e_grad1, e_grad2)
_test('sum', False) _test("sum", False)
_test('max', False) _test("max", False)
_test('mean', False) _test("mean", False)
_test('sum', True) _test("sum", True)
_test('max', True) _test("max", True)
_test('mean', True) _test("mean", True)
def test_all_binary_builtins(): def test_all_binary_builtins():
def _test(g, lhs, rhs, binary_op, reducer, partial, nid, broadcast='none'): def _test(g, lhs, rhs, binary_op, reducer, partial, nid, broadcast="none"):
# initialize node/edge features with uniform(-1, 1) # initialize node/edge features with uniform(-1, 1)
hu, hv, he = generate_feature(g, broadcast, binary_op) hu, hv, he = generate_feature(g, broadcast, binary_op)
if binary_op == 'div': if binary_op == "div":
# op = div # op = div
# lhs range: [-1, 1] # lhs range: [-1, 1]
# rhs range: [1, 2] # rhs range: [1, 2]
# result range: [-1, 1] # result range: [-1, 1]
if rhs == 'u': if rhs == "u":
hu = (hu + 3) / 2 hu = (hu + 3) / 2
elif rhs == 'v': elif rhs == "v":
hv = (hv + 3) / 2 hv = (hv + 3) / 2
elif rhs == 'e': elif rhs == "e":
he = (he + 3) / 2 he = (he + 3) / 2
if binary_op == 'add' or binary_op == 'sub': if binary_op == "add" or binary_op == "sub":
# op = add, sub # op = add, sub
# lhs range: [-1/2, 1/2] # lhs range: [-1/2, 1/2]
# rhs range: [-1/2, 1/2] # rhs range: [-1/2, 1/2]
...@@ -227,9 +248,9 @@ def test_all_binary_builtins(): ...@@ -227,9 +248,9 @@ def test_all_binary_builtins():
hv = hv / 2 hv = hv / 2
he = he / 2 he = he / 2
g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv)) g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he)) g.edata["e"] = F.attach_grad(F.clone(he))
builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs) builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs)
builtin_msg = getattr(fn, builtin_msg_name) builtin_msg = getattr(fn, builtin_msg_name)
...@@ -245,18 +266,18 @@ def test_all_binary_builtins(): ...@@ -245,18 +266,18 @@ def test_all_binary_builtins():
with F.record_grad(): with F.record_grad():
if partial: if partial:
g.pull(nid, builtin_msg(lhs, rhs, 'm'), builtin_red('m', 'r1')) g.pull(nid, builtin_msg(lhs, rhs, "m"), builtin_red("m", "r1"))
else: else:
g.update_all(builtin_msg(lhs, rhs, 'm'), builtin_red('m', 'r1')) g.update_all(builtin_msg(lhs, rhs, "m"), builtin_red("m", "r1"))
r1 = g.ndata.pop('r1') r1 = g.ndata.pop("r1")
F.backward(F.reduce_sum(r1)) F.backward(F.reduce_sum(r1))
lhs_grad_1 = F.grad(target_feature_switch(g, lhs)) lhs_grad_1 = F.grad(target_feature_switch(g, lhs))
rhs_grad_1 = F.grad(target_feature_switch(g, rhs)) rhs_grad_1 = F.grad(target_feature_switch(g, rhs))
# reset grad # reset grad
g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata["u"] = F.attach_grad(F.clone(hu))
g.ndata['v'] = F.attach_grad(F.clone(hv)) g.ndata["v"] = F.attach_grad(F.clone(hv))
g.edata['e'] = F.attach_grad(F.clone(he)) g.edata["e"] = F.attach_grad(F.clone(he))
def target_switch(edges, target): def target_switch(edges, target):
if target == "u": if target == "u":
...@@ -266,7 +287,7 @@ def test_all_binary_builtins(): ...@@ -266,7 +287,7 @@ def test_all_binary_builtins():
elif target == "e": elif target == "e":
return edges.data return edges.data
else: else:
assert(0), "Unknown target {}".format(target) assert 0, "Unknown target {}".format(target)
def mfunc(edges): def mfunc(edges):
op = getattr(F, binary_op) op = getattr(F, binary_op)
...@@ -282,15 +303,15 @@ def test_all_binary_builtins(): ...@@ -282,15 +303,15 @@ def test_all_binary_builtins():
def rfunc(nodes): def rfunc(nodes):
op = getattr(F, reducer) op = getattr(F, reducer)
return {"r2": op(nodes.mailbox['m'], 1)} return {"r2": op(nodes.mailbox["m"], 1)}
with F.record_grad(): with F.record_grad():
if partial: if partial:
g.pull(nid, mfunc, rfunc) g.pull(nid, mfunc, rfunc)
else: else:
g.update_all(mfunc, rfunc) g.update_all(mfunc, rfunc)
r2 = g.ndata.pop('r2') r2 = g.ndata.pop("r2")
F.backward(F.reduce_sum(r2), F.tensor([1.])) F.backward(F.reduce_sum(r2), F.tensor([1.0]))
lhs_grad_2 = F.grad(target_feature_switch(g, lhs)) lhs_grad_2 = F.grad(target_feature_switch(g, lhs))
rhs_grad_2 = F.grad(target_feature_switch(g, rhs)) rhs_grad_2 = F.grad(target_feature_switch(g, rhs))
...@@ -298,27 +319,32 @@ def test_all_binary_builtins(): ...@@ -298,27 +319,32 @@ def test_all_binary_builtins():
atol = 1e-4 atol = 1e-4
def _print_error(a, b): def _print_error(a, b):
print("ERROR: Test {}_{}_{}_{} broadcast: {} partial: {}". print(
format(lhs, binary_op, rhs, reducer, broadcast, partial)) "ERROR: Test {}_{}_{}_{} broadcast: {} partial: {}".format(
lhs, binary_op, rhs, reducer, broadcast, partial
)
)
return return
if lhs == 'u': if lhs == "u":
lhs_data = hu lhs_data = hu
elif lhs == 'v': elif lhs == "v":
lhs_data = hv lhs_data = hv
elif lhs == 'e': elif lhs == "e":
lhs_data = he lhs_data = he
if rhs == 'u': if rhs == "u":
rhs_data = hu rhs_data = hu
elif rhs == 'v': elif rhs == "v":
rhs_data = hv rhs_data = hv
elif rhs == 'e': elif rhs == "e":
rhs_data = he rhs_data = he
print("lhs", F.asnumpy(lhs_data).tolist()) print("lhs", F.asnumpy(lhs_data).tolist())
print("rhs", F.asnumpy(rhs_data).tolist()) print("rhs", F.asnumpy(rhs_data).tolist())
for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): for i, (x, y) in enumerate(
zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())
):
if not np.allclose(x, y, rtol, atol): if not np.allclose(x, y, rtol, atol):
print('@{} {} v.s. {}'.format(i, x, y)) print("@{} {} v.s. {}".format(i, x, y))
if not F.allclose(r1, r2, rtol, atol): if not F.allclose(r1, r2, rtol, atol):
_print_error(r1, r2) _print_error(r1, r2)
...@@ -327,12 +353,12 @@ def test_all_binary_builtins(): ...@@ -327,12 +353,12 @@ def test_all_binary_builtins():
if not F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol): if not F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol):
print("left grad") print("left grad")
_print_error(lhs_grad_1, lhs_grad_2) _print_error(lhs_grad_1, lhs_grad_2)
assert(F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol)) assert F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol)
if not F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol): if not F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol):
print("right grad") print("right grad")
_print_error(rhs_grad_1, rhs_grad_2) _print_error(rhs_grad_1, rhs_grad_2)
assert(F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol)) assert F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol)
g = dgl.DGLGraph() g = dgl.DGLGraph()
g.add_nodes(20) g.add_nodes(20)
...@@ -359,20 +385,30 @@ def test_all_binary_builtins(): ...@@ -359,20 +385,30 @@ def test_all_binary_builtins():
for broadcast in ["none", lhs, rhs]: for broadcast in ["none", lhs, rhs]:
for partial in [False, True]: for partial in [False, True]:
print(lhs, rhs, binary_op, reducer, broadcast, partial) print(lhs, rhs, binary_op, reducer, broadcast, partial)
_test(g, lhs, rhs, binary_op, reducer, partial, nid, _test(
broadcast=broadcast) g,
lhs,
rhs,
binary_op,
reducer,
partial,
nid,
broadcast=broadcast,
)
@parametrize_idtype @parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo-zero-degree'])) @pytest.mark.parametrize("g", get_cases(["homo-zero-degree"]))
def test_mean_zero_degree(g, idtype): def test_mean_zero_degree(g, idtype):
g = g.astype(idtype).to(F.ctx()) g = g.astype(idtype).to(F.ctx())
g.ndata['h'] = F.ones((g.number_of_nodes(), 3)) g.ndata["h"] = F.ones((g.number_of_nodes(), 3))
g.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'x')) g.update_all(fn.copy_u("h", "m"), fn.mean("m", "x"))
deg = F.asnumpy(g.in_degrees()) deg = F.asnumpy(g.in_degrees())
v = F.tensor(np.where(deg == 0)[0]) v = F.tensor(np.where(deg == 0)[0])
assert F.allclose(F.gather_row(g.ndata['x'], v), F.zeros((len(v), 3))) assert F.allclose(F.gather_row(g.ndata["x"], v), F.zeros((len(v), 3)))
if __name__ == '__main__': if __name__ == "__main__":
test_copy_src_reduce() test_copy_src_reduce()
test_copy_edge_reduce() test_copy_edge_reduce()
test_all_binary_builtins() test_all_binary_builtins()
import io
import pickle
import unittest
import backend as F
import dgl
import dgl.function as fn
import networkx as nx import networkx as nx
import pytest
import scipy.sparse as ssp import scipy.sparse as ssp
import dgl import test_utils
from dgl.graph_index import create_graph_index from dgl.graph_index import create_graph_index
from dgl.utils import toindex from dgl.utils import toindex
import backend as F from test_utils import get_cases, parametrize_idtype
import dgl.function as fn
import pickle
import io
import unittest, pytest
import test_utils
from test_utils import parametrize_idtype, get_cases
from utils import assert_is_identical, assert_is_identical_hetero from utils import assert_is_identical, assert_is_identical_hetero
def _assert_is_identical_nodeflow(nf1, nf2): def _assert_is_identical_nodeflow(nf1, nf2):
assert nf1.number_of_nodes() == nf2.number_of_nodes() assert nf1.number_of_nodes() == nf2.number_of_nodes()
src, dst = nf1.all_edges() src, dst = nf1.all_edges()
...@@ -32,23 +36,29 @@ def _assert_is_identical_nodeflow(nf1, nf2): ...@@ -32,23 +36,29 @@ def _assert_is_identical_nodeflow(nf1, nf2):
for k in nf1.blocks[i].data: for k in nf1.blocks[i].data:
assert F.allclose(nf1.blocks[i].data[k], nf2.blocks[i].data[k]) assert F.allclose(nf1.blocks[i].data[k], nf2.blocks[i].data[k])
def _assert_is_identical_batchedgraph(bg1, bg2): def _assert_is_identical_batchedgraph(bg1, bg2):
assert_is_identical(bg1, bg2) assert_is_identical(bg1, bg2)
assert bg1.batch_size == bg2.batch_size assert bg1.batch_size == bg2.batch_size
assert bg1.batch_num_nodes == bg2.batch_num_nodes assert bg1.batch_num_nodes == bg2.batch_num_nodes
assert bg1.batch_num_edges == bg2.batch_num_edges assert bg1.batch_num_edges == bg2.batch_num_edges
def _assert_is_identical_batchedhetero(bg1, bg2): def _assert_is_identical_batchedhetero(bg1, bg2):
assert_is_identical_hetero(bg1, bg2) assert_is_identical_hetero(bg1, bg2)
for ntype in bg1.ntypes: for ntype in bg1.ntypes:
assert bg1.batch_num_nodes(ntype) == bg2.batch_num_nodes(ntype) assert bg1.batch_num_nodes(ntype) == bg2.batch_num_nodes(ntype)
for canonical_etype in bg1.canonical_etypes: for canonical_etype in bg1.canonical_etypes:
assert bg1.batch_num_edges(canonical_etype) == bg2.batch_num_edges(canonical_etype) assert bg1.batch_num_edges(canonical_etype) == bg2.batch_num_edges(
canonical_etype
)
def _assert_is_identical_index(i1, i2): def _assert_is_identical_index(i1, i2):
assert i1.slice_data() == i2.slice_data() assert i1.slice_data() == i2.slice_data()
assert F.array_equal(i1.tousertensor(), i2.tousertensor()) assert F.array_equal(i1.tousertensor(), i2.tousertensor())
def _reconstruct_pickle(obj): def _reconstruct_pickle(obj):
f = io.BytesIO() f = io.BytesIO()
pickle.dump(obj, f) pickle.dump(obj, f)
...@@ -58,11 +68,12 @@ def _reconstruct_pickle(obj): ...@@ -58,11 +68,12 @@ def _reconstruct_pickle(obj):
return obj return obj
def test_pickling_index(): def test_pickling_index():
# normal index # normal index
i = toindex([1, 2, 3]) i = toindex([1, 2, 3])
i.tousertensor() i.tousertensor()
i.todgltensor() # construct a dgl tensor which is unpicklable i.todgltensor() # construct a dgl tensor which is unpicklable
i2 = _reconstruct_pickle(i) i2 = _reconstruct_pickle(i)
_assert_is_identical_index(i, i2) _assert_is_identical_index(i, i2)
...@@ -71,6 +82,7 @@ def test_pickling_index(): ...@@ -71,6 +82,7 @@ def test_pickling_index():
i2 = _reconstruct_pickle(i) i2 = _reconstruct_pickle(i)
_assert_is_identical_index(i, i2) _assert_is_identical_index(i, i2)
def test_pickling_graph_index(): def test_pickling_graph_index():
gi = create_graph_index(None, False) gi = create_graph_index(None, False)
gi.add_nodes(3) gi.add_nodes(3)
...@@ -87,53 +99,65 @@ def test_pickling_graph_index(): ...@@ -87,53 +99,65 @@ def test_pickling_graph_index():
def _global_message_func(nodes): def _global_message_func(nodes):
return {'x': nodes.data['x']} return {"x": nodes.data["x"]}
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented") @unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
@parametrize_idtype @parametrize_idtype
@pytest.mark.parametrize('g', get_cases(exclude=['dglgraph', 'two_hetero_batch'])) @pytest.mark.parametrize(
"g", get_cases(exclude=["dglgraph", "two_hetero_batch"])
)
def test_pickling_graph(g, idtype): def test_pickling_graph(g, idtype):
g = g.astype(idtype) g = g.astype(idtype)
new_g = _reconstruct_pickle(g) new_g = _reconstruct_pickle(g)
test_utils.check_graph_equal(g, new_g, check_feature=True) test_utils.check_graph_equal(g, new_g, check_feature=True)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
def test_pickling_batched_heterograph(): def test_pickling_batched_heterograph():
# copied from test_heterograph.create_test_heterograph() # copied from test_heterograph.create_test_heterograph()
g = dgl.heterograph({ g = dgl.heterograph(
('user', 'follows', 'user'): ([0, 1], [1, 2]), {
('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]), ("user", "follows", "user"): ([0, 1], [1, 2]),
('user', 'wishes', 'game'): ([0, 2], [1, 0]), ("user", "plays", "game"): ([0, 1, 2, 1], [0, 0, 1, 1]),
('developer', 'develops', 'game'): ([0, 1], [0, 1]) ("user", "wishes", "game"): ([0, 2], [1, 0]),
}) ("developer", "develops", "game"): ([0, 1], [0, 1]),
g2 = dgl.heterograph({ }
('user', 'follows', 'user'): ([0, 1], [1, 2]), )
('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]), g2 = dgl.heterograph(
('user', 'wishes', 'game'): ([0, 2], [1, 0]), {
('developer', 'develops', 'game'): ([0, 1], [0, 1]) ("user", "follows", "user"): ([0, 1], [1, 2]),
}) ("user", "plays", "game"): ([0, 1, 2, 1], [0, 0, 1, 1]),
("user", "wishes", "game"): ([0, 2], [1, 0]),
g.nodes['user'].data['u_h'] = F.randn((3, 4)) ("developer", "develops", "game"): ([0, 1], [0, 1]),
g.nodes['game'].data['g_h'] = F.randn((2, 5)) }
g.edges['plays'].data['p_h'] = F.randn((4, 6)) )
g2.nodes['user'].data['u_h'] = F.randn((3, 4))
g2.nodes['game'].data['g_h'] = F.randn((2, 5)) g.nodes["user"].data["u_h"] = F.randn((3, 4))
g2.edges['plays'].data['p_h'] = F.randn((4, 6)) g.nodes["game"].data["g_h"] = F.randn((2, 5))
g.edges["plays"].data["p_h"] = F.randn((4, 6))
g2.nodes["user"].data["u_h"] = F.randn((3, 4))
g2.nodes["game"].data["g_h"] = F.randn((2, 5))
g2.edges["plays"].data["p_h"] = F.randn((4, 6))
bg = dgl.batch([g, g2]) bg = dgl.batch([g, g2])
new_bg = _reconstruct_pickle(bg) new_bg = _reconstruct_pickle(bg)
test_utils.check_graph_equal(bg, new_bg) test_utils.check_graph_equal(bg, new_bg)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU edge_subgraph w/ relabeling not implemented")
@unittest.skipIf(
F._default_context_str == "gpu",
reason="GPU edge_subgraph w/ relabeling not implemented",
)
def test_pickling_subgraph(): def test_pickling_subgraph():
f1 = io.BytesIO() f1 = io.BytesIO()
f2 = io.BytesIO() f2 = io.BytesIO()
g = dgl.rand_graph(10000, 100000) g = dgl.rand_graph(10000, 100000)
g.ndata['x'] = F.randn((10000, 4)) g.ndata["x"] = F.randn((10000, 4))
g.edata['x'] = F.randn((100000, 5)) g.edata["x"] = F.randn((100000, 5))
pickle.dump(g, f1) pickle.dump(g, f1)
sg = g.subgraph([0, 1]) sg = g.subgraph([0, 1])
sgx = sg.ndata['x'] # materialize sgx = sg.ndata["x"] # materialize
pickle.dump(sg, f2) pickle.dump(sg, f2)
# TODO(BarclayII): How should I test that the size of the subgraph pickle file should not # TODO(BarclayII): How should I test that the size of the subgraph pickle file should not
# be as large as the size of the original pickle file? # be as large as the size of the original pickle file?
...@@ -141,38 +165,47 @@ def test_pickling_subgraph(): ...@@ -141,38 +165,47 @@ def test_pickling_subgraph():
f2.seek(0) f2.seek(0)
f2.truncate() f2.truncate()
sgx = sg.edata['x'] # materialize sgx = sg.edata["x"] # materialize
pickle.dump(sg, f2) pickle.dump(sg, f2)
assert f1.tell() > f2.tell() * 50 assert f1.tell() > f2.tell() * 50
f2.seek(0) f2.seek(0)
f2.truncate() f2.truncate()
sg = g.edge_subgraph([0]) sg = g.edge_subgraph([0])
sgx = sg.edata['x'] # materialize sgx = sg.edata["x"] # materialize
pickle.dump(sg, f2) pickle.dump(sg, f2)
assert f1.tell() > f2.tell() * 50 assert f1.tell() > f2.tell() * 50
f2.seek(0) f2.seek(0)
f2.truncate() f2.truncate()
sgx = sg.ndata['x'] # materialize sgx = sg.ndata["x"] # materialize
pickle.dump(sg, f2) pickle.dump(sg, f2)
assert f1.tell() > f2.tell() * 50 assert f1.tell() > f2.tell() * 50
f1.close() f1.close()
f2.close() f2.close()
@unittest.skipIf(F._default_context_str != 'gpu', reason="Need GPU for pin")
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TensorFlow create graph on gpu when unpickle") @unittest.skipIf(F._default_context_str != "gpu", reason="Need GPU for pin")
@unittest.skipIf(
dgl.backend.backend_name == "tensorflow",
reason="TensorFlow create graph on gpu when unpickle",
)
@parametrize_idtype @parametrize_idtype
def test_pickling_is_pinned(idtype): def test_pickling_is_pinned(idtype):
from copy import deepcopy from copy import deepcopy
g = dgl.rand_graph(10, 20, idtype=idtype, device=F.cpu()) g = dgl.rand_graph(10, 20, idtype=idtype, device=F.cpu())
hg = dgl.heterograph({ hg = dgl.heterograph(
('user', 'follows', 'user'): ([0, 1], [1, 2]), {
('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]), ("user", "follows", "user"): ([0, 1], [1, 2]),
('user', 'wishes', 'game'): ([0, 2], [1, 0]), ("user", "plays", "game"): ([0, 1, 2, 1], [0, 0, 1, 1]),
('developer', 'develops', 'game'): ([0, 1], [0, 1]) ("user", "wishes", "game"): ([0, 2], [1, 0]),
}, idtype=idtype, device=F.cpu()) ("developer", "develops", "game"): ([0, 1], [0, 1]),
},
idtype=idtype,
device=F.cpu(),
)
for graph in [g, hg]: for graph in [g, hg]:
assert not graph.is_pinned() assert not graph.is_pinned()
graph.pin_memory_() graph.pin_memory_()
...@@ -186,7 +219,7 @@ def test_pickling_is_pinned(idtype): ...@@ -186,7 +219,7 @@ def test_pickling_is_pinned(idtype):
graph.unpin_memory_() graph.unpin_memory_()
if __name__ == '__main__': if __name__ == "__main__":
test_pickling_index() test_pickling_index()
test_pickling_graph_index() test_pickling_graph_index()
test_pickling_frame() test_pickling_frame()
......
import backend as F import backend as F
import numpy as np
from test_utils import parametrize_idtype
import dgl import dgl
import numpy as np
from test_utils import parametrize_idtype
@parametrize_idtype @parametrize_idtype
......
import io
import multiprocessing as mp
import os
import pickle
import unittest
import backend as F
import dgl
import dgl.function as fn
import networkx as nx import networkx as nx
import scipy.sparse as ssp import scipy.sparse as ssp
import dgl
from dgl.graph_index import create_graph_index from dgl.graph_index import create_graph_index
from dgl.utils import toindex from dgl.utils import toindex
import backend as F
import dgl.function as fn
import pickle
import io
import unittest
from test_utils import parametrize_idtype from test_utils import parametrize_idtype
import multiprocessing as mp
import os
def create_test_graph(idtype): def create_test_graph(idtype):
g = dgl.heterograph(({ g = dgl.heterograph(
('user', 'follows', 'user'): ([0, 1], [1, 2]), (
('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]), {
('user', 'wishes', 'game'): ([0, 2], [1, 0]), ("user", "follows", "user"): ([0, 1], [1, 2]),
('developer', 'develops', 'game'): ([0, 1], [0, 1]) ("user", "plays", "game"): ([0, 1, 2, 1], [0, 0, 1, 1]),
}), idtype=idtype) ("user", "wishes", "game"): ([0, 2], [1, 0]),
("developer", "develops", "game"): ([0, 1], [0, 1]),
}
),
idtype=idtype,
)
return g return g
def _assert_is_identical_hetero(g, g2): def _assert_is_identical_hetero(g, g2):
assert g.ntypes == g2.ntypes assert g.ntypes == g2.ntypes
assert g.canonical_etypes == g2.canonical_etypes assert g.canonical_etypes == g2.canonical_etypes
...@@ -35,29 +44,38 @@ def _assert_is_identical_hetero(g, g2): ...@@ -35,29 +44,38 @@ def _assert_is_identical_hetero(g, g2):
# check if edge ID spaces and feature spaces are equal # check if edge ID spaces and feature spaces are equal
for etype in g.canonical_etypes: for etype in g.canonical_etypes:
src, dst = g.all_edges(etype=etype, order='eid') src, dst = g.all_edges(etype=etype, order="eid")
src2, dst2 = g2.all_edges(etype=etype, order='eid') src2, dst2 = g2.all_edges(etype=etype, order="eid")
assert F.array_equal(src, src2) assert F.array_equal(src, src2)
assert F.array_equal(dst, dst2) assert F.array_equal(dst, dst2)
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(
dgl.backend.backend_name == "tensorflow",
reason="Not support tensorflow for now",
)
@parametrize_idtype @parametrize_idtype
def test_single_process(idtype): def test_single_process(idtype):
hg = create_test_graph(idtype=idtype) hg = create_test_graph(idtype=idtype)
hg_share = hg.shared_memory("hg") hg_share = hg.shared_memory("hg")
hg_rebuild = dgl.hetero_from_shared_memory('hg') hg_rebuild = dgl.hetero_from_shared_memory("hg")
hg_save_again = hg_rebuild.shared_memory("hg") hg_save_again = hg_rebuild.shared_memory("hg")
_assert_is_identical_hetero(hg, hg_share) _assert_is_identical_hetero(hg, hg_share)
_assert_is_identical_hetero(hg, hg_rebuild) _assert_is_identical_hetero(hg, hg_rebuild)
_assert_is_identical_hetero(hg, hg_save_again) _assert_is_identical_hetero(hg, hg_save_again)
def sub_proc(hg_origin, name): def sub_proc(hg_origin, name):
hg_rebuild = dgl.hetero_from_shared_memory(name) hg_rebuild = dgl.hetero_from_shared_memory(name)
hg_save_again = hg_rebuild.shared_memory(name) hg_save_again = hg_rebuild.shared_memory(name)
_assert_is_identical_hetero(hg_origin, hg_rebuild) _assert_is_identical_hetero(hg_origin, hg_rebuild)
_assert_is_identical_hetero(hg_origin, hg_save_again) _assert_is_identical_hetero(hg_origin, hg_save_again)
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(
dgl.backend.backend_name == "tensorflow",
reason="Not support tensorflow for now",
)
@parametrize_idtype @parametrize_idtype
def test_multi_process(idtype): def test_multi_process(idtype):
hg = create_test_graph(idtype=idtype) hg = create_test_graph(idtype=idtype)
...@@ -66,8 +84,14 @@ def test_multi_process(idtype): ...@@ -66,8 +84,14 @@ def test_multi_process(idtype):
p.start() p.start()
p.join() p.join()
@unittest.skipIf(F._default_context_str == 'cpu', reason="Need gpu for this test")
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now') @unittest.skipIf(
F._default_context_str == "cpu", reason="Need gpu for this test"
)
@unittest.skipIf(
dgl.backend.backend_name == "tensorflow",
reason="Not support tensorflow for now",
)
def test_copy_from_gpu(): def test_copy_from_gpu():
hg = create_test_graph(idtype=F.int32) hg = create_test_graph(idtype=F.int32)
hg_gpu = hg.to(F.cuda()) hg_gpu = hg.to(F.cuda())
...@@ -76,6 +100,7 @@ def test_copy_from_gpu(): ...@@ -76,6 +100,7 @@ def test_copy_from_gpu():
p.start() p.start()
p.join() p.join()
# TODO: Test calling shared_memory with Blocks (a subclass of HeteroGraph) # TODO: Test calling shared_memory with Blocks (a subclass of HeteroGraph)
if __name__ == "__main__": if __name__ == "__main__":
test_single_process(F.int64) test_single_process(F.int64)
......
...@@ -4,18 +4,18 @@ from collections import Counter ...@@ -4,18 +4,18 @@ from collections import Counter
from itertools import product from itertools import product
import backend as F import backend as F
import dgl
import dgl.function as fn
import networkx as nx import networkx as nx
import numpy as np import numpy as np
import pytest import pytest
import scipy.sparse as ssp import scipy.sparse as ssp
import test_utils import test_utils
from dgl import DGLError
from scipy.sparse import rand from scipy.sparse import rand
from test_utils import get_cases, parametrize_idtype from test_utils import get_cases, parametrize_idtype
import dgl
import dgl.function as fn
from dgl import DGLError
rfuncs = {"sum": fn.sum, "max": fn.max, "min": fn.min, "mean": fn.mean} rfuncs = {"sum": fn.sum, "max": fn.max, "min": fn.min, "mean": fn.mean}
feat_size = 2 feat_size = 2
...@@ -48,7 +48,6 @@ def create_test_heterograph(idtype): ...@@ -48,7 +48,6 @@ def create_test_heterograph(idtype):
def create_test_heterograph_2(idtype): def create_test_heterograph_2(idtype):
src = np.random.randint(0, 50, 25) src = np.random.randint(0, 50, 25)
dst = np.random.randint(0, 50, 25) dst = np.random.randint(0, 50, 25)
src1 = np.random.randint(0, 25, 10) src1 = np.random.randint(0, 25, 10)
...@@ -72,7 +71,6 @@ def create_test_heterograph_2(idtype): ...@@ -72,7 +71,6 @@ def create_test_heterograph_2(idtype):
def create_test_heterograph_large(idtype): def create_test_heterograph_large(idtype):
src = np.random.randint(0, 50, 2500) src = np.random.randint(0, 50, 2500)
dst = np.random.randint(0, 50, 2500) dst = np.random.randint(0, 50, 2500)
g = dgl.heterograph( g = dgl.heterograph(
...@@ -163,7 +161,6 @@ def test_unary_copy_u(idtype): ...@@ -163,7 +161,6 @@ def test_unary_copy_u(idtype):
@parametrize_idtype @parametrize_idtype
def test_unary_copy_e(idtype): def test_unary_copy_e(idtype):
def _test(mfunc, rfunc): def _test(mfunc, rfunc):
g = create_test_heterograph_large(idtype) g = create_test_heterograph_large(idtype)
g0 = create_test_heterograph_2(idtype) g0 = create_test_heterograph_2(idtype)
g1 = create_test_heterograph(idtype) g1 = create_test_heterograph(idtype)
...@@ -230,6 +227,7 @@ def test_unary_copy_e(idtype): ...@@ -230,6 +227,7 @@ def test_unary_copy_e(idtype):
e_grad6 = F.grad(g["plays"].edata["eid"]) e_grad6 = F.grad(g["plays"].edata["eid"])
e_grad7 = F.grad(g["wishes"].edata["eid"]) e_grad7 = F.grad(g["wishes"].edata["eid"])
e_grad8 = F.grad(g["follows"].edata["eid"]) e_grad8 = F.grad(g["follows"].edata["eid"])
# # correctness check # # correctness check
def _print_error(a, b): def _print_error(a, b):
for i, (x, y) in enumerate( for i, (x, y) in enumerate(
...@@ -254,7 +252,6 @@ def test_unary_copy_e(idtype): ...@@ -254,7 +252,6 @@ def test_unary_copy_e(idtype):
@parametrize_idtype @parametrize_idtype
def test_binary_op(idtype): def test_binary_op(idtype):
def _test(lhs, rhs, binary_op, reducer): def _test(lhs, rhs, binary_op, reducer):
g = create_test_heterograph(idtype) g = create_test_heterograph(idtype)
x1 = F.randn((g.num_nodes("user"), feat_size)) x1 = F.randn((g.num_nodes("user"), feat_size))
...@@ -309,6 +306,7 @@ def test_binary_op(idtype): ...@@ -309,6 +306,7 @@ def test_binary_op(idtype):
r2 = g.nodes["game"].data["y"] r2 = g.nodes["game"].data["y"]
F.backward(r2, F.ones(r2.shape)) F.backward(r2, F.ones(r2.shape))
n_grad2 = F.grad(r2) n_grad2 = F.grad(r2)
# correctness check # correctness check
def _print_error(a, b): def _print_error(a, b):
for i, (x, y) in enumerate( for i, (x, y) in enumerate(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment