Unverified Commit be444e52 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Doc/Feature] Refactor, doc update and behavior fix for graphs (#1983)



* Update graph

* Fix for dgl.graph

* from_scipy

* Replace canonical_etypes with relations

* from_networkx

* Update for hetero_from_relations

* Roll back the change of canonical_etypes to relations

* heterograph

* bipartite

* Update doc

* Fix lint

* Fix lint

* Fix test cases

* Fix

* Fix

* Fix

* Fix

* Fix

* Fix

* Update

* Fix test

* Fix

* Update

* Use DGLError

* Update

* Update

* Update

* Update

* Fix

* Fix

* Fix

* Fix

* Fix

* Fix

* Fix

* Fix

* Update

* Fix

* Update

* Fix

* Fix

* Fix

* Update

* Fix

* Update

* Fix

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Fix

* Fix

* Update

* Update

* Update

* Update

* Update

* Update

* rewrite sanity checks

* delete unnecessary checks

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Fix

* Update

* Update

* Update

* Fix

* Fix

* Fix

* Update

* Fix

* Update

* Fix

* Fix

* Update

* Fix

* Update

* Fix
Co-authored-by: default avatarxiang song(charlie.song) <classicxsong@gmail.com>
Co-authored-by: default avatarMinjie Wang <wmjlyjemaine@gmail.com>
Co-authored-by: default avatarQuan Gan <coin2028@hotmail.com>
parent 0afc3cf8
......@@ -106,7 +106,7 @@ class TAGConv(gluon.Block):
is size of output feature.
"""
with graph.local_scope():
assert graph.is_homogeneous(), 'Graph is not homogeneous'
assert graph.is_homogeneous, 'Graph is not homogeneous'
degs = graph.in_degrees().astype('float32')
norm = mx.nd.power(mx.nd.clip(degs, a_min=1, a_max=float("inf")), -0.5)
......
......@@ -136,8 +136,9 @@ class GatedGraphConv(nn.Module):
is the output feature size.
"""
with graph.local_scope():
assert graph.is_homogeneous(), \
"not a homograph; convert it with to_homo and pass in the edge type as argument"
assert graph.is_homogeneous, \
"not a homogeneous graph; convert it with to_homogeneous " \
"and pass in the edge type as argument"
assert etypes.min() >= 0 and etypes.max() < self._n_etypes, \
"edge type indices out of range [0, {})".format(self._n_etypes)
zero_pad = feat.new_zeros((feat.shape[0], self._out_feats - feat.shape[1]))
......
......@@ -111,7 +111,7 @@ class TAGConv(nn.Module):
is size of output feature.
"""
with graph.local_scope():
assert graph.is_homogeneous(), 'Graph is not homogeneous'
assert graph.is_homogeneous, 'Graph is not homogeneous'
norm = th.pow(graph.in_degrees().float().clamp(min=1), -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
......
......@@ -270,8 +270,9 @@ class RelGraphConv(layers.Layer):
tf.Tensor
New node features.
"""
assert g.is_homogeneous(), \
"not a homograph; convert it with to_homo and pass in the edge type as argument"
assert g.is_homogeneous, \
"not a homogeneous graph; convert it with to_homogeneous " \
"and pass in the edge type as argument"
with g.local_scope():
g.ndata['h'] = x
g.edata['type'] = tf.cast(etypes, tf.int64)
......
......@@ -50,7 +50,7 @@ def segment_reduce(seglen, value, reducer='sum'):
if len(u) != len(v):
raise DGLError("Invalid seglen array:", seglen,
". Its summation must be equal to value.shape[0].")
g = convert.bipartite((u, v))
g = convert.heterograph({('_U', '_E', '_V'): (u, v)})
g.srcdata['h'] = value
g.update_all(fn.copy_u('h', 'm'), getattr(fn, reducer)('m', 'h'))
return g.dstdata['h']
......
......@@ -114,8 +114,10 @@ class RandomWalkNeighborSampler(object):
dst = F.boolean_mask(dst, src_mask)
# count the number of visits and pick the K-most frequent neighbors for each node
neighbor_graph = convert.graph(
(src, dst), num_nodes=self.G.number_of_nodes(self.ntype), ntype=self.ntype)
neighbor_graph = convert.heterograph(
{(self.ntype, '_E', self.ntype): (src, dst)},
{self.ntype: self.G.number_of_nodes(self.ntype)}
)
neighbor_graph = transform.to_simple(neighbor_graph, return_counts=self.weight_column)
counts = neighbor_graph.edata[self.weight_column]
neighbor_graph = select_topk(neighbor_graph, self.num_neighbors, self.weight_column)
......@@ -176,8 +178,8 @@ class PinSAGESampler(RandomWalkNeighborSampler):
>>> g = scipy.sparse.random(3000, 5000, 0.003)
>>> G = dgl.heterograph({
... ('A', 'AB', 'B'): g,
... ('B', 'BA', 'A'): g.T})
... ('A', 'AB', 'B'): g.nonzero(),
... ('B', 'BA', 'A'): g.T.nonzero()})
Then we create a PinSage neighbor sampler that samples a graph of node type "A". Each
node would have (a maximum of) 10 neighbors.
......
......@@ -80,8 +80,7 @@ def random_walk(g, nodes, *, metapath=None, length=None, prob=None, restart_prob
Examples
--------
The following creates a homogeneous graph:
>>> g1 = dgl.graph([(0, 1), (1, 2), (1, 3), (2, 0), (3, 0)], 'user', 'follow')
>>> g1 = dgl.graph(([0, 1, 1, 2, 3], [1, 2, 3, 0, 0]))
Normal random walk:
......@@ -93,7 +92,7 @@ def random_walk(g, nodes, *, metapath=None, length=None, prob=None, restart_prob
The first tensor indicates the random walk path for each seed node.
The j-th element in the second tensor indicates the node type ID of the j-th node
in every path. In this case, it is returning all 0 (``user``).
in every path. In this case, it is returning all 0.
Random walk with restart:
......@@ -115,9 +114,9 @@ def random_walk(g, nodes, *, metapath=None, length=None, prob=None, restart_prob
Metapath-based random walk:
>>> g2 = dgl.heterograph({
... ('user', 'follow', 'user'): [(0, 1), (1, 2), (1, 3), (2, 0), (3, 0)],
... ('user', 'view', 'item'): [(0, 0), (0, 1), (1, 1), (2, 2), (3, 2), (3, 1)],
... ('item', 'viewed-by', 'user'): [(0, 0), (1, 0), (1, 1), (2, 2), (2, 3), (1, 3)]})
... ('user', 'follow', 'user'): ([0, 1, 1, 2, 3], [1, 2, 3, 0, 0]),
... ('user', 'view', 'item'): ([0, 0, 1, 2, 3, 3], [0, 1, 1, 2, 2, 1]),
... ('item', 'viewed-by', 'user'): ([0, 1, 1, 2, 2, 1], [0, 0, 1, 2, 3, 3])
>>> dgl.sampling.random_walk(
... g2, [0, 1, 2, 0], metapath=['follow', 'view', 'viewed-by'] * 2)
(tensor([[0, 1, 1, 1, 2, 2, 3],
......@@ -215,9 +214,9 @@ def pack_traces(traces, types):
Examples
--------
>>> g2 = dgl.heterograph({
... ('user', 'follow', 'user'): [(0, 1), (1, 2), (1, 3), (2, 0), (3, 0)],
... ('user', 'view', 'item'): [(0, 0), (0, 1), (1, 1), (2, 2), (3, 2), (3, 1)],
... ('item', 'viewed-by', 'user'): [(0, 0), (1, 0), (1, 1), (2, 2), (2, 3), (1, 3)]})
... ('user', 'follow', 'user'): ([0, 1, 1, 2, 3], [1, 2, 3, 0, 0]),
... ('user', 'view', 'item'): ([0, 0, 1, 2, 3, 3], [0, 1, 1, 2, 2, 1]),
... ('item', 'viewed-by', 'user'): ([0, 1, 1, 2, 2, 1], [0, 0, 1, 2, 3, 3])
>>> traces, types = dgl.sampling.random_walk(
... g2, [0, 0], metapath=['follow', 'view', 'viewed-by'] * 2,
... restart_prob=torch.FloatTensor([0, 0.5, 0, 0, 0.5, 0]))
......
......@@ -60,8 +60,9 @@ def node_subgraph(graph, nodes):
Instantiate a heterograph.
>>> g = dgl.heterograph({
... ('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
... ('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2])})
>>> ('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
>>> ('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2])
>>> })
>>> # Set node features
>>> g.nodes['user'].data['h'] = torch.tensor([[0.], [1.], [2.]])
......@@ -119,7 +120,11 @@ def node_subgraph(graph, nodes):
return F.astype(F.nonzero_1d(F.copy_to(v, graph.device)), graph.idtype)
else:
return utils.prepare_tensor(graph, v, 'nodes["{}"]'.format(ntype))
induced_nodes = [_process_nodes(ntype, nodes.get(ntype, [])) for ntype in graph.ntypes]
induced_nodes = []
for ntype in graph.ntypes:
nids = nodes.get(ntype, F.copy_to(F.tensor([], graph.idtype), graph.device))
induced_nodes.append(_process_nodes(ntype, nids))
sgi = graph._graph.node_subgraph(induced_nodes)
induced_edges = sgi.induced_edges
return _create_hetero_subgraph(graph, sgi, induced_nodes, induced_edges)
......@@ -176,8 +181,9 @@ def edge_subgraph(graph, edges, preserve_nodes=False):
Instantiate a heterograph.
>>> g = dgl.heterograph({
... ('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
... ('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2])})
>>> ('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
>>> ('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2])
>>> })
>>> # Set edge features
>>> g.edges['follows'].data['h'] = torch.tensor([[0.], [1.], [2.]])
......@@ -238,9 +244,10 @@ def edge_subgraph(graph, edges, preserve_nodes=False):
return utils.prepare_tensor(graph, e, 'edges["{}"]'.format(etype))
edges = {graph.to_canonical_etype(etype): e for etype, e in edges.items()}
induced_edges = [
_process_edges(cetype, edges.get(cetype, []))
for cetype in graph.canonical_etypes]
induced_edges = []
for cetype in graph.canonical_etypes:
eids = edges.get(cetype, F.copy_to(F.tensor([], graph.idtype), graph.device))
induced_edges.append(_process_edges(cetype, eids))
sgi = graph._graph.edge_subgraph(induced_edges, preserve_nodes)
induced_nodes = sgi.induced_nodes
return _create_hetero_subgraph(graph, sgi, induced_nodes, induced_edges)
......@@ -452,8 +459,9 @@ def node_type_subgraph(graph, ntypes):
Instantiate a heterograph.
>>> g = dgl.heterograph({
... ('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
... ('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2])})
>>> ('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
>>> ('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2])
>>> })
>>> # Set node features
>>> g.nodes['user'].data['h'] = torch.tensor([[0.], [1.], [2.]])
......@@ -519,8 +527,9 @@ def edge_type_subgraph(graph, etypes):
Instantiate a heterograph.
>>> g = dgl.heterograph({
... ('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
... ('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2])})
>>> ('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
>>> ('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2])
>>> })
>>> # Set edge features
>>> g.edges['follows'].data['h'] = torch.tensor([[0.], [1.], [2.]])
......@@ -549,7 +558,7 @@ def edge_type_subgraph(graph, etypes):
node_type_subgraph
"""
etype_ids = [graph.get_etype_id(etype) for etype in etypes]
# meta graph is homograph, still using int64
# meta graph is homogeneous graph, still using int64
meta_src, meta_dst, _ = graph._graph.metagraph.find_edges(utils.toindex(etype_ids, "int64"))
rel_graphs = [graph._graph.get_relation_graph(i) for i in etype_ids]
meta_src = meta_src.tonumpy()
......
......@@ -148,7 +148,7 @@ def knn_graph(x, k):
(F.asnumpy(F.zeros_like(dst) + 1), (F.asnumpy(dst), F.asnumpy(src))),
shape=(n_samples * n_points, n_samples * n_points))
return convert.graph(adj)
return convert.from_scipy(adj)
#pylint: disable=invalid-name
def segmented_knn_graph(x, k, segs):
......@@ -220,8 +220,7 @@ def segmented_knn_graph(x, k, segs):
src = F.reshape(src, (-1,))
adj = sparse.csr_matrix((F.asnumpy(F.zeros_like(dst) + 1), (F.asnumpy(dst), F.asnumpy(src))))
g = convert.graph(adj)
return g
return convert.from_scipy(adj)
def to_bidirected(g, readonly=None, copy_ndata=False):
r"""Convert the graph to a bidirectional simple graph, adding reverse edges and
......@@ -373,7 +372,7 @@ def add_reverse_edges(g, readonly=None, copy_ndata=True,
--------
**Homogeneous graphs**
>>> g = dgl.graph(th.tensor([0, 0]), th.tensor([0, 1]))
>>> g = dgl.graph((th.tensor([0, 0]), th.tensor([0, 1])))
>>> bg1 = dgl.add_reverse_edges(g)
>>> bg1.edges()
(tensor([0, 0, 0, 1]), tensor([0, 1, 0, 0]))
......@@ -381,10 +380,10 @@ def add_reverse_edges(g, readonly=None, copy_ndata=True,
**Heterogeneous graphs with Multiple Edge Types**
>>> g = dgl.heterograph({
... ('user', 'wins', 'user'): (th.tensor([0, 2, 0, 2, 2]), th.tensor([1, 1, 2, 1, 0])),
... ('user', 'plays', 'game'): (th.tensor([1, 2, 1]), th.tensor([2, 1, 1])),
... ('user', 'follows', 'user'): (th.tensor([1, 2, 1), th.tensor([0, 0, 0]))
... })
>>> ('user', 'wins', 'user'): (th.tensor([0, 2, 0, 2, 2]), th.tensor([1, 1, 2, 1, 0])),
>>> ('user', 'plays', 'game'): (th.tensor([1, 2, 1]), th.tensor([2, 1, 1])),
>>> ('user', 'follows', 'user'): (th.tensor([1, 2, 1), th.tensor([0, 0, 0]))
>>> })
>>> g.nodes['game'].data['hv'] = th.ones(3, 1)
>>> g.edges['wins'].data['h'] = th.tensor([0, 1, 2, 3, 4])
......@@ -521,7 +520,7 @@ def line_graph(g, backtracking=True, shared=False):
>>> lg.edges()
(tensor([0, 1, 2, 4]), tensor([4, 0, 3, 1]))
"""
assert g.is_homogeneous(), \
assert g.is_homogeneous, \
'only homogeneous graph is supported'
dev = g.device
......@@ -572,7 +571,7 @@ def khop_adj(g, k):
[1., 3., 3., 1., 0.],
[0., 1., 3., 3., 1.]])
"""
assert g.is_homogeneous(), \
assert g.is_homogeneous, \
'only homogeneous graph is supported'
adj_k = g.adj(scipy_fmt=g.formats()['created'][0]) ** k
return F.tensor(adj_k.todense().astype(np.float32))
......@@ -637,7 +636,7 @@ def khop_graph(g, k, copy_ndata=True):
ndata_schemes={}
edata_schemes={})
"""
assert g.is_homogeneous(), \
assert g.is_homogeneous, \
'only homogeneous graph is supported'
n = g.number_of_nodes()
adj_k = g.adj(transpose=True, scipy_fmt=g.formats()['created'][0]) ** k
......@@ -958,12 +957,9 @@ def metapath_reachable_graph(g, metapath):
adj = (adj != 0).tocsr()
srctype = g.to_canonical_etype(metapath[0])[0]
dsttype = g.to_canonical_etype(metapath[-1])[2]
if srctype == dsttype:
assert adj.shape[0] == adj.shape[1]
new_g = convert.graph(adj, ntype=srctype, idtype=g.idtype, device=g.device)
else:
new_g = convert.bipartite(adj, utype=srctype, vtype=dsttype,
idtype=g.idtype, device=g.device)
new_g = convert.heterograph({(srctype, '_E', dsttype): adj.nonzero()},
{srctype: adj.shape[0], dsttype: adj.shape[1]},
idtype=g.idtype, device=g.device)
# copy srcnode features
new_g.nodes[srctype].data.update(g.nodes[srctype].data)
......@@ -1516,7 +1512,8 @@ def compact_graphs(graphs, always_preserve=None, copy_ndata=True, copy_edata=Tru
The following code constructs a bipartite graph with 20 users and 10 games, but
only user #1 and #3, as well as game #3 and #5, have connections:
>>> g = dgl.bipartite([(1, 3), (3, 5)], 'user', 'plays', 'game', num_nodes=(20, 10))
>>> g = dgl.heterograph({('user', 'plays', 'game'): ([1, 3], [3, 5])},
>>> {'user': 20, 'game': 10})
The following would compact the graph above to another bipartite graph with only
two users and two games.
......@@ -1538,7 +1535,8 @@ def compact_graphs(graphs, always_preserve=None, copy_ndata=True, copy_edata=Tru
of the given graphs are removed. So if you compact ``g`` and the following ``g2``
graphs together:
>>> g2 = dgl.bipartite([(1, 6), (6, 8)], 'user', 'plays', 'game', num_nodes=(20, 10))
>>> g2 = dgl.heterograph({('user', 'plays', 'game'): ([1, 6], [6, 8])},
>>> {'user': 20, 'game': 10})
>>> (new_g, new_g2), induced_nodes = dgl.compact_graphs([g, g2])
>>> induced_nodes
{'user': tensor([1, 3, 6]), 'game': tensor([3, 5, 6, 8])}
......@@ -1671,8 +1669,7 @@ def to_block(g, dst_nodes=None, include_dst_in_src=True):
Examples
--------
Converting a homogeneous graph to a block as described above:
>>> g = dgl.graph([(0, 1), (1, 2), (2, 3)])
>>> g = dgl.graph(([0, 1, 2], [1, 2, 3]))
>>> block = dgl.to_block(g, torch.LongTensor([3, 2]))
The output nodes would be exactly the same as the ones given: [3, 2].
......@@ -1708,7 +1705,7 @@ def to_block(g, dst_nodes=None, include_dst_in_src=True):
Converting a heterogeneous graph to a block is similar, except that when specifying
the output nodes, you have to give a dict:
>>> g = dgl.bipartite([(0, 1), (1, 2), (2, 3)], utype='A', vtype='B')
>>> g = dgl.heterograph({('A', '_E', 'B'): ([0, 1, 2], [1, 2, 3])})
If you don't specify any node of type A on the output side, the node type ``A``
in the block would have zero nodes on the output side.
......
......@@ -36,7 +36,7 @@ def bfs_nodes_generator(graph, source, reverse=False):
/ \\
0 - 1 - 3 - 5
>>> g = dgl.graph([(0, 1), (1, 2), (1, 3), (2, 3), (2, 4), (3, 5)])
>>> g = dgl.graph(([0, 1, 1, 2, 2, 3], [1, 2, 3, 3, 4, 5]))
>>> list(dgl.bfs_nodes_generator(g, 0))
[tensor([0]), tensor([1]), tensor([2, 3]), tensor([4, 5])]
"""
......@@ -80,7 +80,7 @@ def bfs_edges_generator(graph, source, reverse=False):
/ \\
0 - 1 - 3 - 5
>>> g = dgl.graph([(0, 1), (1, 2), (1, 3), (2, 3), (2, 4), (3, 5)])
>>> g = dgl.graph(([0, 1, 1, 2, 2, 3], [1, 2, 3, 3, 4, 5]))
>>> list(dgl.bfs_edges_generator(g, 0))
[tensor([0]), tensor([1, 2]), tensor([4, 5])]
"""
......@@ -121,7 +121,7 @@ def topological_nodes_generator(graph, reverse=False):
/ \\
0 - 1 - 3 - 5
>>> g = dgl.graph([(0, 1), (1, 2), (1, 3), (2, 3), (2, 4), (3, 5)])
>>> g = dgl.graph(([0, 1, 1, 2, 2, 3], [1, 2, 3, 3, 4, 5]))
>>> list(dgl.topological_nodes_generator(g))
[tensor([0]), tensor([1]), tensor([2]), tensor([3, 4]), tensor([5])]
"""
......@@ -169,7 +169,7 @@ def dfs_edges_generator(graph, source, reverse=False):
Edge addition order [(0, 1), (1, 2), (1, 3), (2, 3), (2, 4), (3, 5)]
>>> g = dgl.graph([(0, 1), (1, 2), (1, 3), (2, 3), (2, 4), (3, 5)])
>>> g = dgl.graph(([0, 1, 1, 2, 2, 3], [1, 2, 3, 3, 4, 5]))
>>> list(dgl.dfs_edges_generator(g, 0))
[tensor([0]), tensor([1]), tensor([3]), tensor([5]), tensor([4])]
"""
......@@ -243,7 +243,7 @@ def dfs_labeled_edges_generator(
Edge addition order [(0, 1), (1, 2), (1, 3), (2, 3), (2, 4), (3, 5)]
>>> g = dgl.graph([(0, 1), (1, 2), (1, 3), (2, 3), (2, 4), (3, 5)])
>>> g = dgl.graph(([0, 1, 1, 2, 2, 3], [1, 2, 3, 3, 4, 5]))
>>> list(dgl.dfs_labeled_edges_generator(g, 0, has_nontree_edge=True))
(tensor([0]), tensor([1]), tensor([3]), tensor([5]), tensor([4]), tensor([2])),
(tensor([0]), tensor([0]), tensor([0]), tensor([0]), tensor([0]), tensor([2]))
......
......@@ -27,7 +27,6 @@ def prepare_tensor(g, data, name):
Tensor
Data in tensor object.
"""
ret = None
if F.is_tensor(data):
if F.dtype(data) != g.idtype or F.context(data) != g.device:
raise DGLError('Expect argument "{}" to have data type {} and device '
......@@ -35,9 +34,15 @@ def prepare_tensor(g, data, name):
name, g.idtype, g.device, F.dtype(data), F.context(data)))
ret = data
else:
ret = F.copy_to(F.tensor(data, g.idtype), g.device)
if F.ndim(ret) != 1:
data = F.tensor(data)
if F.dtype(data) not in (F.int32, F.int64):
raise DGLError('Expect argument "{}" to have data type int32 or int64,'
' but got {}.'.format(name, F.dtype(data)))
ret = F.copy_to(F.astype(data, g.idtype), g.device)
if F.ndim(ret) == 0:
ret = F.unsqueeze(ret, 0)
if F.ndim(ret) > 1:
raise DGLError('Expect a 1-D tensor for argument "{}". But got {}.'.format(
name, ret))
return ret
......@@ -158,3 +163,15 @@ def check_all_same_schema(feat_dict_list, keys, name):
' and feature size, but got\n\t{} {}\nand\n\t{} {}.'.format(
name, k, F.dtype(t1), F.shape(t1)[1:],
F.dtype(t2), F.shape(t2)[1:]))
def check_valid_idtype(idtype):
"""Check whether the value of the idtype argument is valid (int32/int64)
Parameters
----------
idtype : data type
The framework object of a data type.
"""
if idtype not in [None, F.int32, F.int64]:
raise DGLError('Expect idtype to be a framework object of int32/int64, '
'got {}'.format(idtype))
......@@ -5,6 +5,7 @@ import networkx as nx
from ..base import DGLError
from .. import backend as F
from . import checks
def elist2tensor(elist, idtype):
"""Function to convert an edge list to edge tensors.
......@@ -49,7 +50,7 @@ def scipy2tensor(spmat, idtype):
col = F.tensor(spmat.col, idtype)
return row, col
def networkx2tensor(nx_graph, idtype, edge_id_attr_name='id'):
def networkx2tensor(nx_graph, idtype, edge_id_attr_name=None):
"""Function to convert a networkx graph to edge tensors.
Parameters
......@@ -60,7 +61,7 @@ def networkx2tensor(nx_graph, idtype, edge_id_attr_name='id'):
Integer ID type. Must be int32 or int64.
edge_id_attr_name : str, optional
Key name for edge ids in the NetworkX graph. If not found, we
will consider the graph not to have pre-specified edge ids. (Default: 'id')
will consider the graph not to have pre-specified edge ids. (Default: None)
Returns
-------
......@@ -72,19 +73,17 @@ def networkx2tensor(nx_graph, idtype, edge_id_attr_name='id'):
# Relabel nodes using consecutive integers
nx_graph = nx.convert_node_labels_to_integers(nx_graph, ordering='sorted')
# nx_graph.edges(data=True) returns src, dst, attr_dict
if nx_graph.number_of_edges() > 0:
has_edge_id = edge_id_attr_name in next(iter(nx_graph.edges(data=True)))[-1]
else:
has_edge_id = False
has_edge_id = edge_id_attr_name is not None
if has_edge_id:
num_edges = nx_graph.number_of_edges()
src = [0] * num_edges
dst = [0] * num_edges
for u, v, attr in nx_graph.edges(data=True):
eid = attr[edge_id_attr_name]
eid = int(attr[edge_id_attr_name])
if eid < 0 or eid >= nx_graph.number_of_edges():
raise DGLError('Expect edge IDs to be a non-negative integer smaller than {:d}, '
'got {:d}'.format(num_edges, eid))
src[eid] = u
dst[eid] = v
else:
......@@ -97,7 +96,7 @@ def networkx2tensor(nx_graph, idtype, edge_id_attr_name='id'):
dst = F.tensor(dst, idtype)
return src, dst
def graphdata2tensors(data, idtype=None, bipartite=False):
def graphdata2tensors(data, idtype=None, bipartite=False, **kwargs):
"""Function to convert various types of data to edge tensors and infer
the number of nodes.
......@@ -111,6 +110,14 @@ def graphdata2tensors(data, idtype=None, bipartite=False):
bipartite : bool, optional
Whether infer number of nodes of a bipartite graph --
num_src and num_dst can be different.
kwargs
- edge_id_attr_name : The name (str) of the edge attribute that stores the edge
IDs in the NetworkX graph.
- top_map : The dictionary mapping the original IDs of the source nodes to the
new ones.
- bottom_map : The dictionary mapping the original IDs of the destination nodes
to the new ones.
Returns
-------
......@@ -127,19 +134,62 @@ def graphdata2tensors(data, idtype=None, bipartite=False):
# preferred default idtype is int64
# if data is tensor and idtype is None, infer the idtype from tensor
idtype = F.int64
checks.check_valid_idtype(idtype)
if isinstance(data, tuple) and (not F.is_tensor(data[0]) or not F.is_tensor(data[1])):
# (Iterable, Iterable) type data, convert it to (Tensor, Tensor)
if len(data[0]) == 0:
# force idtype for empty list
data = F.tensor(data[0], idtype), F.tensor(data[1], idtype)
else:
# convert the iterable to tensor and keep its native data type so we can check
# its validity later
data = F.tensor(data[0]), F.tensor(data[1])
if isinstance(data, tuple):
src, dst = F.tensor(data[0], idtype), F.tensor(data[1], idtype)
# (Tensor, Tensor) type data
src, dst = data
# sanity checks
# TODO(minjie): move these checks to C for faster graph construction.
if F.dtype(src) != F.dtype(dst):
raise DGLError('Expect the source and destination node IDs to have the same type,'
' but got {} and {}.'.format(F.dtype(src), F.dtype(dst)))
if F.context(src) != F.context(dst):
raise DGLError('Expect the source and destination node IDs to be on the same device,'
' but got {} and {}.'.format(F.context(src), F.context(dst)))
if F.dtype(src) not in (F.int32, F.int64):
raise DGLError('Expect the source ID tensor to have data type int32 or int64,'
' but got {}.'.format(F.dtype(src)))
if F.dtype(dst) not in (F.int32, F.int64):
raise DGLError('Expect the destination ID tensor to have data type int32 or int64,'
' but got {}.'.format(F.dtype(dst)))
if idtype is not None:
src, dst = F.astype(src, idtype), F.astype(dst, idtype)
elif isinstance(data, list):
src, dst = elist2tensor(data, idtype)
elif isinstance(data, sp.sparse.spmatrix):
src, dst = scipy2tensor(data, idtype)
elif isinstance(data, nx.Graph):
edge_id_attr_name = kwargs.get('edge_id_attr_name', None)
if bipartite:
src, dst = networkxbipartite2tensors(data, idtype)
top_map = kwargs.get('top_map')
bottom_map = kwargs.get('bottom_map')
src, dst = networkxbipartite2tensors(
data, idtype, top_map=top_map,
bottom_map=bottom_map, edge_id_attr_name=edge_id_attr_name)
else:
src, dst = networkx2tensor(data, idtype)
src, dst = networkx2tensor(
data, idtype, edge_id_attr_name=edge_id_attr_name)
else:
raise DGLError('Unsupported graph data type:', type(data))
if len(src) != len(dst):
raise DGLError('Expect the source and destination ID tensors to have the same length,'
' but got {} and {}.'.format(len(src), len(dst)))
if len(src) > 0 and (F.as_scalar(F.min(src, 0)) < 0 or F.as_scalar(F.min(dst, 0)) < 0):
raise DGLError('All IDs must be non-negative integers.')
# infer number of nodes
infer_from_raw = infer_num_nodes(data, bipartite=bipartite)
if infer_from_raw is None:
num_src, num_dst = infer_num_nodes((src, dst), bipartite=bipartite)
......@@ -147,7 +197,7 @@ def graphdata2tensors(data, idtype=None, bipartite=False):
num_src, num_dst = infer_from_raw
return src, dst, num_src, num_dst
def networkxbipartite2tensors(nx_graph, idtype, edge_id_attr_name='id'):
def networkxbipartite2tensors(nx_graph, idtype, top_map, bottom_map, edge_id_attr_name=None):
"""Function to convert a networkx bipartite to edge tensors.
Parameters
......@@ -155,49 +205,54 @@ def networkxbipartite2tensors(nx_graph, idtype, edge_id_attr_name='id'):
nx_graph : nx.Graph
NetworkX graph. It must follow the bipartite graph convention of networkx.
Each node has an attribute ``bipartite`` with values 0 and 1 indicating
which set it belongs to. Only edges from node set 0 to node set 1 are
added to the returned graph.
which set it belongs to.
top_map : dict
The dictionary mapping the original node labels to the node IDs for the source type.
bottom_map : dict
The dictionary mapping the original node labels to the node IDs for the destination type.
idtype : int32, int64, optional
Integer ID type. Must be int32 or int64.
edge_id_attr_name : str, optional
Key name for edge ids in the NetworkX graph. If not found, we
will consider the graph not to have pre-specified edge ids. (Default: 'id')
will consider the graph not to have pre-specified edge ids. (Default: None)
Returns
-------
(Tensor, Tensor)
Edge tensors.
"""
if not nx_graph.is_directed():
nx_graph = nx_graph.to_directed()
top_nodes = {n for n, d in nx_graph.nodes(data=True) if d['bipartite'] == 0}
bottom_nodes = set(nx_graph) - top_nodes
top_nodes = sorted(top_nodes)
bottom_nodes = sorted(bottom_nodes)
top_map = {n : i for i, n in enumerate(top_nodes)}
bottom_map = {n : i for i, n in enumerate(bottom_nodes)}
if nx_graph.number_of_edges() > 0:
has_edge_id = edge_id_attr_name in next(iter(nx_graph.edges(data=True)))[-1]
else:
has_edge_id = False
has_edge_id = edge_id_attr_name is not None
if has_edge_id:
num_edges = nx_graph.number_of_edges()
src = [0] * num_edges
dst = [0] * num_edges
for u, v, attr in nx_graph.edges(data=True):
eid = attr[edge_id_attr_name]
if u not in top_map:
raise DGLError('Expect the node {} to have attribute bipartite=0 '
'with edge {}'.format(u, (u, v)))
if v not in bottom_map:
raise DGLError('Expect the node {} to have attribute bipartite=1 '
'with edge {}'.format(v, (u, v)))
eid = int(attr[edge_id_attr_name])
if eid < 0 or eid >= nx_graph.number_of_edges():
raise DGLError('Expect edge IDs to be a non-negative integer smaller than {:d}, '
'got {:d}'.format(num_edges, eid))
src[eid] = top_map[u]
dst[eid] = bottom_map[v]
else:
src = []
dst = []
for e in nx_graph.edges:
if e[0] in top_map:
src.append(top_map[e[0]])
dst.append(bottom_map[e[1]])
u, v = e[0], e[1]
if u not in top_map:
raise DGLError('Expect the node {} to have attribute bipartite=0 '
'with edge {}'.format(u, (u, v)))
if v not in bottom_map:
raise DGLError('Expect the node {} to have attribute bipartite=1 '
'with edge {}'.format(v, (u, v)))
src.append(top_map[u])
dst.append(bottom_map[v])
src = F.tensor(src, dtype=idtype)
dst = F.tensor(dst, dtype=idtype)
return src, dst
......
......@@ -420,7 +420,7 @@ FlattenedHeteroGraphPtr HeteroGraph::FlattenImpl(const std::vector<dgl_type_t>&
dsttype_set.push_back(dsttype);
}
}
// Sort the node types so that we can compare the sets and decide whether a homograph
// Sort the node types so that we can compare the sets and decide whether a homogeneous graph
// should be returned.
std::sort(srctype_set.begin(), srctype_set.end());
std::sort(dsttype_set.begin(), dsttype_set.end());
......
......@@ -52,7 +52,7 @@ HaloHeteroSubgraph GetSubgraphWithHalo(std::shared_ptr<HeteroGraph> hg,
IdArray nodes, int num_hops) {
CHECK_EQ(hg->NumBits(), 64) << "halo subgraph only supports 64bits graph";
CHECK_EQ(hg->relation_graphs().size(), 1)
<< "halo subgraph only supports homograph";
<< "halo subgraph only supports homogeneous graph";
CHECK_EQ(nodes->dtype.bits, 64)
<< "halo subgraph only supports 64bits nodes tensor";
const dgl_id_t *nid = static_cast<dgl_id_t *>(nodes->data);
......
......@@ -145,30 +145,19 @@ def test_batch_setter_getter(idtype):
truth = [0.] * 17
truth[0] = truth[4] = truth[3] = truth[9] = truth[16] = 1.
assert _pfc(g.edata['l']) == truth
# set partial edges (many-one)
u = F.tensor([3, 4, 6], g.idtype)
v = F.tensor([9], g.idtype)
v = F.tensor([9, 9, 9], g.idtype)
g.edges[u, v].data['l'] = F.ones((3, D))
truth[5] = truth[7] = truth[11] = 1.
assert _pfc(g.edata['l']) == truth
# set partial edges (one-many)
u = F.tensor([0], g.idtype)
u = F.tensor([0, 0, 0], g.idtype)
v = F.tensor([4, 5, 6], g.idtype)
g.edges[u, v].data['l'] = F.ones((3, D))
truth[6] = truth[8] = truth[10] = 1.
assert _pfc(g.edata['l']) == truth
# get partial edges (many-many)
u = F.tensor([0, 6, 0], g.idtype)
v = F.tensor([6, 9, 7], g.idtype)
assert _pfc(g.edges[u, v].data['l']) == [1., 1., 0.]
# get partial edges (many-one)
u = F.tensor([5, 6, 7], g.idtype)
v = F.tensor([9], g.idtype)
assert _pfc(g.edges[u, v].data['l']) == [1., 1., 0.]
# get partial edges (one-many)
u = F.tensor([0], g.idtype)
v = F.tensor([3, 4, 5], g.idtype)
assert _pfc(g.edges[u, v].data['l']) == [1., 1., 1.]
assert _pfc(g.edges[u, v].data['l']) == [1.0, 1.0, 0.0]
@parametrize_dtype
def test_batch_setter_autograd(idtype):
......@@ -221,9 +210,7 @@ def _test_nx_conversion():
n3 = F.randn((5, 4))
e1 = F.randn((4, 5))
e2 = F.randn((4, 7))
g = DGLGraph()
g.add_nodes(5)
g.add_edges([0,1,3,4], [2,4,0,3])
g = dgl.graph(([0, 1, 3, 4], [2, 4, 0, 3]))
g.ndata.update({'n1': n1, 'n2': n2, 'n3': n3})
g.edata.update({'e1': e1, 'e2': e2})
......@@ -369,7 +356,7 @@ def test_update_routines(idtype):
@parametrize_dtype
def test_update_all_0deg(idtype):
# test#1
g = dgl.graph([(1,0), (2,0), (3,0), (4,0)], idtype=idtype, device=F.ctx())
g = dgl.graph(([1, 2, 3, 4], [0, 0, 0, 0]), idtype=idtype, device=F.ctx())
def _message(edges):
return {'m' : edges.src['h']}
def _reduce(nodes):
......@@ -390,7 +377,7 @@ def test_update_all_0deg(idtype):
assert F.allclose(new_repr[0], 2 * F.sum(old_repr, 0))
# test#2: graph with no edge
g = dgl.graph([], num_nodes=5, idtype=idtype, device=F.ctx())
g = dgl.graph(([], []), num_nodes=5, idtype=idtype, device=F.ctx())
g.ndata['h'] = old_repr
g.update_all(_message, _reduce, lambda nodes : {'h' : nodes.data['h'] * 2})
new_repr = g.ndata['h']
......@@ -399,7 +386,7 @@ def test_update_all_0deg(idtype):
@parametrize_dtype
def test_pull_0deg(idtype):
g = dgl.graph([(0,1)], idtype=idtype, device=F.ctx())
g = dgl.graph(([0], [1]), idtype=idtype, device=F.ctx())
def _message(edges):
return {'m' : edges.src['h']}
def _reduce(nodes):
......@@ -465,7 +452,7 @@ def test_dynamic_addition():
@parametrize_dtype
def test_repr(idtype):
g = dgl.graph([(0,1), (0,2), (1,2)], num_nodes=10, idtype=idtype, device=F.ctx())
g = dgl.graph(([0, 0, 1], [1, 2, 2]), num_nodes=10, idtype=idtype, device=F.ctx())
repr_string = g.__repr__()
print(repr_string)
g.ndata['x'] = F.zeros((10, 5))
......@@ -475,7 +462,7 @@ def test_repr(idtype):
@parametrize_dtype
def test_local_var(idtype):
g = dgl.graph([(0,1), (1,2), (2,3), (3,4)], idtype=idtype, device=F.ctx())
g = dgl.graph(([0, 1, 2, 3], [1, 2, 3, 4]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.zeros((g.number_of_nodes(), 3))
g.edata['w'] = F.zeros((g.number_of_edges(), 4))
# test override
......@@ -512,7 +499,7 @@ def test_local_var(idtype):
assert 'ww' not in g.edata
# test initializer1
g = dgl.graph([(0,1), (1,1)], idtype=idtype, device=F.ctx())
g = dgl.graph(([0, 1], [1, 1]), idtype=idtype, device=F.ctx())
g.set_n_initializer(dgl.init.zero_initializer)
def foo(g):
g = g.local_var()
......@@ -533,7 +520,7 @@ def test_local_var(idtype):
@parametrize_dtype
def test_local_scope(idtype):
g = dgl.graph([(0,1), (1,2), (2,3), (3,4)], idtype=idtype, device=F.ctx())
g = dgl.graph(([0, 1, 2, 3], [1, 2, 3, 4]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.zeros((g.number_of_nodes(), 3))
g.edata['w'] = F.zeros((g.number_of_edges(), 4))
# test override
......@@ -584,7 +571,7 @@ def test_local_scope(idtype):
assert 'ww' not in g.edata
# test initializer1
g = dgl.graph([(0,1), (1,1)], idtype=idtype, device=F.ctx())
g = dgl.graph(([0, 1], [1, 1]), idtype=idtype, device=F.ctx())
g.set_n_initializer(dgl.init.zero_initializer)
def foo(g):
with g.local_scope():
......@@ -605,29 +592,27 @@ def test_local_scope(idtype):
@parametrize_dtype
def test_isolated_nodes(idtype):
g = dgl.graph([(0, 1), (1, 2)], num_nodes=5, idtype=idtype, device=F.ctx())
assert g.number_of_nodes() == 5
# Test backward compatibility
g = dgl.graph([(0, 1), (1, 2)], card=5, idtype=idtype, device=F.ctx())
g = dgl.graph(([0, 1], [1, 2]), num_nodes=5, idtype=idtype, device=F.ctx())
assert g.number_of_nodes() == 5
g = dgl.bipartite([(0, 2), (0, 3), (1, 2)], 'user', 'plays',
'game', num_nodes=(5, 7), idtype=idtype, device=F.ctx())
g = dgl.heterograph({
('user', 'plays', 'game'): ([0, 0, 1], [2, 3, 2])
}, {'user': 5, 'game': 7}, idtype=idtype, device=F.ctx())
assert g.idtype == idtype
assert g.number_of_nodes('user') == 5
assert g.number_of_nodes('game') == 7
# Test backward compatibility
g = dgl.bipartite([(0, 2), (0, 3), (1, 2)], 'user', 'plays',
'game', card=(5, 7), idtype=idtype, device=F.ctx())
g = dgl.heterograph({
('user', 'plays', 'game'): ([0, 0, 1], [2, 3, 2])
}, {'user': 5, 'game': 7}, idtype=idtype, device=F.ctx())
assert g.idtype == idtype
assert g.number_of_nodes('user') == 5
assert g.number_of_nodes('game') == 7
@parametrize_dtype
def test_send_multigraph(idtype):
g = dgl.graph([(0,1), (0,1), (0,1), (2,1)], idtype=idtype, device=F.ctx())
g = dgl.graph(([0, 0, 0, 2], [1, 1, 1, 1]), idtype=idtype, device=F.ctx())
def _message_a(edges):
return {'a': edges.data['a']}
......
......@@ -12,7 +12,7 @@ def tree1(idtype):
3 4
Edges are from leaves to root.
"""
g = dgl.DGLGraph().astype(idtype).to(F.ctx())
g = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g.add_nodes(5)
g.add_edge(3, 1)
g.add_edge(4, 1)
......@@ -31,7 +31,7 @@ def tree2(idtype):
2 0
Edges are from leaves to root.
"""
g = dgl.DGLGraph().astype(idtype).to(F.ctx())
g = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g.add_nodes(5)
g.add_edge(2, 4)
g.add_edge(0, 4)
......@@ -120,10 +120,10 @@ def test_batch_unbatch_frame(idtype):
@parametrize_dtype
def test_batch_unbatch2(idtype):
# test setting/getting features after batch
a = dgl.DGLGraph().astype(idtype).to(F.ctx())
a = dgl.graph(([], [])).astype(idtype).to(F.ctx())
a.add_nodes(4)
a.add_edges(0, [1, 2, 3])
b = dgl.DGLGraph().astype(idtype).to(F.ctx())
b = dgl.graph(([], [])).astype(idtype).to(F.ctx())
b.add_nodes(3)
b.add_edges(0, [1, 2])
c = dgl.batch([a, b])
......@@ -179,12 +179,12 @@ def test_batch_propagate(idtype):
@parametrize_dtype
def test_batched_edge_ordering(idtype):
g1 = dgl.DGLGraph().astype(idtype).to(F.ctx())
g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g1.add_nodes(6)
g1.add_edges([4, 4, 2, 2, 0], [5, 3, 3, 1, 1])
e1 = F.randn((5, 10))
g1.edata['h'] = e1
g2 = dgl.DGLGraph().astype(idtype).to(F.ctx())
g2 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g2.add_nodes(6)
g2.add_edges([0, 1 ,2 ,5, 4 ,5], [1, 2, 3, 4, 3, 0])
e2 = F.randn((6, 10))
......@@ -196,13 +196,13 @@ def test_batched_edge_ordering(idtype):
@parametrize_dtype
def test_batch_no_edge(idtype):
g1 = dgl.DGLGraph().astype(idtype).to(F.ctx())
g1 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g1.add_nodes(6)
g1.add_edges([4, 4, 2, 2, 0], [5, 3, 3, 1, 1])
g2 = dgl.DGLGraph().astype(idtype).to(F.ctx())
g2 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g2.add_nodes(6)
g2.add_edges([0, 1, 2, 5, 4, 5], [1 ,2 ,3, 4, 3, 0])
g3 = dgl.DGLGraph().astype(idtype).to(F.ctx())
g3 = dgl.graph(([], [])).astype(idtype).to(F.ctx())
g3.add_nodes(1) # no edges
g = dgl.batch([g1, g3, g2]) # should not throw an error
......
......@@ -44,14 +44,14 @@ def check_equivalence_between_heterographs(g1, g2, node_attrs=None, edge_attrs=N
def test_topology(idtype):
"""Test batching two DGLHeteroGraphs where some nodes are isolated in some relations"""
g1 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1), (1, 2)],
('user', 'follows', 'developer'): [(0, 1), (1, 2)],
('user', 'plays', 'game'): [(0, 0), (1, 0), (2, 1), (3, 1)]
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'follows', 'developer'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1, 2, 3], [0, 0, 1, 1])
}, idtype=idtype, device=F.ctx())
g2 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1), (1, 2)],
('user', 'follows', 'developer'): [(0, 1), (1, 2)],
('user', 'plays', 'game'): [(0, 0), (1, 0), (2, 1)]
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'follows', 'developer'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1, 2], [0, 0, 1])
}, idtype=idtype, device=F.ctx())
bg = dgl.batch([g1, g2])
......@@ -113,17 +113,17 @@ def test_topology(idtype):
def test_batching_batched(idtype):
"""Test batching a DGLHeteroGraph and a BatchedDGLHeteroGraph."""
g1 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1), (1, 2)],
('user', 'plays', 'game'): [(0, 0), (1, 0)]
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1], [0, 0])
}, idtype=idtype, device=F.ctx())
g2 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1), (1, 2)],
('user', 'plays', 'game'): [(0, 0), (1, 0)]
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1], [0, 0])
}, idtype=idtype, device=F.ctx())
bg1 = dgl.batch([g1, g2])
g3 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1)],
('user', 'plays', 'game'): [(1, 0)]
('user', 'follows', 'user'): ([0], [1]),
('user', 'plays', 'game'): ([1], [0])
}, idtype=idtype, device=F.ctx())
bg2 = dgl.batch([bg1, g3])
assert bg2.idtype == idtype
......@@ -169,8 +169,8 @@ def test_batching_batched(idtype):
def test_features(idtype):
"""Test the features of batched DGLHeteroGraphs"""
g1 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1), (1, 2)],
('user', 'plays', 'game'): [(0, 0), (1, 0)]
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1], [0, 0])
}, idtype=idtype, device=F.ctx())
g1.nodes['user'].data['h1'] = F.tensor([[0.], [1.], [2.]])
g1.nodes['user'].data['h2'] = F.tensor([[3.], [4.], [5.]])
......@@ -181,8 +181,8 @@ def test_features(idtype):
g1.edges['plays'].data['h1'] = F.tensor([[0.], [1.]])
g2 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1), (1, 2)],
('user', 'plays', 'game'): [(0, 0), (1, 0)]
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1], [0, 0])
}, idtype=idtype, device=F.ctx())
g2.nodes['user'].data['h1'] = F.tensor([[0.], [1.], [2.]])
g2.nodes['user'].data['h2'] = F.tensor([[3.], [4.], [5.]])
......@@ -243,8 +243,8 @@ def test_features(idtype):
def test_empty_relation(idtype):
"""Test the features of batched DGLHeteroGraphs"""
g1 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1), (1, 2)],
('user', 'plays', 'game'): []
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([], [])
}, idtype=idtype, device=F.ctx())
g1.nodes['user'].data['h1'] = F.tensor([[0.], [1.], [2.]])
g1.nodes['user'].data['h2'] = F.tensor([[3.], [4.], [5.]])
......@@ -252,8 +252,8 @@ def test_empty_relation(idtype):
g1.edges['follows'].data['h2'] = F.tensor([[2.], [3.]])
g2 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1), (1, 2)],
('user', 'plays', 'game'): [(0, 0), (1, 0)]
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1], [0, 0])
}, idtype=idtype, device=F.ctx())
g2.nodes['user'].data['h1'] = F.tensor([[0.], [1.], [2.]])
g2.nodes['user'].data['h2'] = F.tensor([[3.], [4.], [5.]])
......@@ -298,8 +298,8 @@ def test_empty_relation(idtype):
edge_attrs={('user', 'follows', 'user'): ['h1']})
# Test graphs without edges
g1 = dgl.bipartite([], 'u', 'r', 'v', num_nodes=(0, 4))
g2 = dgl.bipartite([], 'u', 'r', 'v', num_nodes=(1, 5))
g1 = dgl.heterograph({('u', 'r', 'v'): ([], [])}, {'u': 0, 'v': 4})
g2 = dgl.heterograph({('u', 'r', 'v'): ([], [])}, {'u': 1, 'v': 5})
dgl.batch([g1, g2])
@parametrize_dtype
......
......@@ -223,8 +223,6 @@ def test_query():
_test(gen_from_data(elist_input(), False, False))
_test(gen_from_data(elist_input(), True, False))
_test(gen_from_data(elist_input(), True, True))
_test(gen_from_data(nx_input(), False, False))
_test(gen_from_data(nx_input(), True, False))
_test(gen_from_data(scipy_coo_input(), False, False))
_test(gen_from_data(scipy_coo_input(), True, False))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment