Unverified Commit 7ff04152 authored by peizhou001's avatar peizhou001 Committed by GitHub
Browse files

[Refactor] Add default ffi namespce capi (#5359)

parent 30b89e6a
"""Init all C APIs in the default namespace."""
from .function import _init_api
__all__ = _init_api("dgl.capi", __name__)
...@@ -297,13 +297,14 @@ def _init_api(namespace, target_module_name=None): ...@@ -297,13 +297,14 @@ def _init_api(namespace, target_module_name=None):
""" """
target_module_name = target_module_name if target_module_name else namespace target_module_name = target_module_name if target_module_name else namespace
if namespace.startswith("dgl."): if namespace.startswith("dgl."):
_init_api_prefix(target_module_name, namespace[4:]) return _init_api_prefix(target_module_name, namespace[4:])
else: else:
_init_api_prefix(target_module_name, namespace) return _init_api_prefix(target_module_name, namespace)
def _init_api_prefix(module_name, prefix): def _init_api_prefix(module_name, prefix):
module = sys.modules[module_name] module = sys.modules[module_name]
name_list = []
for name in list_global_func_names(): for name in list_global_func_names():
if name.startswith("_") and not name.startswith("_deprecate"): if name.startswith("_") and not name.startswith("_deprecate"):
...@@ -324,6 +325,9 @@ def _init_api_prefix(module_name, prefix): ...@@ -324,6 +325,9 @@ def _init_api_prefix(module_name, prefix):
ff.__name__ = fname ff.__name__ = fname
ff.__doc__ = "DGL PackedFunc %s. " % fname ff.__doc__ = "DGL PackedFunc %s. " % fname
setattr(target_module, ff.__name__, ff) setattr(target_module, ff.__name__, ff)
name_list.append(fname)
return name_list
def _init_internal_api(): def _init_internal_api():
......
"""Transform for structures and features""" """Transform for structures and features"""
from .functional import * from .functional import *
from .module import * from .module import *
from .to_block import *
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
# pylint: disable= too-many-lines # pylint: disable= too-many-lines
import copy import copy
from collections import defaultdict
from collections.abc import Iterable, Mapping from collections.abc import Iterable, Mapping
import numpy as np import numpy as np
...@@ -41,7 +40,7 @@ from .. import ( ...@@ -41,7 +40,7 @@ from .. import (
from .._ffi.function import _init_api from .._ffi.function import _init_api
from ..base import dgl_warning, DGLError, EID, NID from ..base import dgl_warning, DGLError, EID, NID
from ..frame import Frame from ..frame import Frame
from ..heterograph import DGLBlock, DGLGraph from ..heterograph import DGLGraph
from ..heterograph_index import ( from ..heterograph_index import (
create_heterograph_from_relations, create_heterograph_from_relations,
create_metagraph_index, create_metagraph_index,
...@@ -71,7 +70,6 @@ __all__ = [ ...@@ -71,7 +70,6 @@ __all__ = [
"remove_self_loop", "remove_self_loop",
"metapath_reachable_graph", "metapath_reachable_graph",
"compact_graphs", "compact_graphs",
"to_block",
"to_simple", "to_simple",
"to_simple_graph", "to_simple_graph",
"sort_csr_by_tag", "sort_csr_by_tag",
...@@ -2304,230 +2302,6 @@ def compact_graphs( ...@@ -2304,230 +2302,6 @@ def compact_graphs(
return new_graphs return new_graphs
def to_block(g, dst_nodes=None, include_dst_in_src=True, src_nodes=None):
"""Convert a graph into a bipartite-structured *block* for message passing.
A block is a graph consisting of two sets of nodes: the
*source* nodes and *destination* nodes. The source and destination nodes can have multiple
node types. All the edges connect from source nodes to destination nodes.
Specifically, the source nodes and destination nodes will have the same node types as the
ones in the original graph. DGL maps each edge ``(u, v)`` with edge type
``(utype, etype, vtype)`` in the original graph to the edge with type
``etype`` connecting from node ID ``u`` of type ``utype`` in the source side to node
ID ``v`` of type ``vtype`` in the destination side.
For blocks returned by :func:`to_block`, the destination nodes of the block will only
contain the nodes that have at least one inbound edge of any type. The source nodes
of the block will only contain the nodes that appear in the destination nodes, as well
as the nodes that have at least one outbound edge connecting to one of the destination nodes.
The destination nodes are specified by the :attr:`dst_nodes` argument if it is not None.
Parameters
----------
graph : DGLGraph
The graph. Can be either on CPU or GPU.
dst_nodes : Tensor or dict[str, Tensor], optional
The list of destination nodes.
If a tensor is given, the graph must have only one node type.
If given, it must be a superset of all the nodes that have at least one inbound
edge. An error will be raised otherwise.
include_dst_in_src : bool
If False, do not include destination nodes in source nodes.
(Default: True)
src_nodes : Tensor or disct[str, Tensor], optional
The list of source nodes (and prefixed by destination nodes if
`include_dst_in_src` is True).
If a tensor is given, the graph must have only one node type.
Returns
-------
DGLBlock
The new graph describing the block.
The node IDs induced for each type in both sides would be stored in feature
``dgl.NID``.
The edge IDs induced for each type would be stored in feature ``dgl.EID``.
Raises
------
DGLError
If :attr:`dst_nodes` is specified but it is not a superset of all the nodes that
have at least one inbound edge.
If :attr:`dst_nodes` is not None, and :attr:`g` and :attr:`dst_nodes`
are not in the same context.
Notes
-----
:func:`to_block` is most commonly used in customizing neighborhood sampling
for stochastic training on a large graph. Please refer to the user guide
:ref:`guide-minibatch` for a more thorough discussion about the methodology
of stochastic training.
See also :func:`create_block` for more flexible construction of blocks.
Examples
--------
Converting a homogeneous graph to a block as described above:
>>> g = dgl.graph(([1, 2], [2, 3]))
>>> block = dgl.to_block(g, torch.LongTensor([3, 2]))
The destination nodes would be exactly the same as the ones given: [3, 2].
>>> induced_dst = block.dstdata[dgl.NID]
>>> induced_dst
tensor([3, 2])
The first few source nodes would also be exactly the same as
the ones given. The rest of the nodes are the ones necessary for message passing
into nodes 3, 2. This means that the node 1 would be included.
>>> induced_src = block.srcdata[dgl.NID]
>>> induced_src
tensor([3, 2, 1])
You can notice that the first two nodes are identical to the given nodes as well as
the destination nodes.
The induced edges can also be obtained by the following:
>>> block.edata[dgl.EID]
tensor([2, 1])
This indicates that edge (2, 3) and (1, 2) are included in the result graph. You can
verify that the first edge in the block indeed maps to the edge (2, 3), and the
second edge in the block indeed maps to the edge (1, 2):
>>> src, dst = block.edges(order='eid')
>>> induced_src[src], induced_dst[dst]
(tensor([2, 1]), tensor([3, 2]))
The destination nodes specified must be a superset of the nodes that have edges connecting
to them. For example, the following will raise an error since the destination nodes
does not contain node 3, which has an edge connecting to it.
>>> g = dgl.graph(([1, 2], [2, 3]))
>>> dgl.to_block(g, torch.LongTensor([2])) # error
Converting a heterogeneous graph to a block is similar, except that when specifying
the destination nodes, you have to give a dict:
>>> g = dgl.heterograph({('A', '_E', 'B'): ([1, 2], [2, 3])})
If you don't specify any node of type A on the destination side, the node type ``A``
in the block would have zero nodes on the destination side.
>>> block = dgl.to_block(g, {'B': torch.LongTensor([3, 2])})
>>> block.number_of_dst_nodes('A')
0
>>> block.number_of_dst_nodes('B')
2
>>> block.dstnodes['B'].data[dgl.NID]
tensor([3, 2])
The source side would contain all the nodes on the destination side:
>>> block.srcnodes['B'].data[dgl.NID]
tensor([3, 2])
As well as all the nodes that have connections to the nodes on the destination side:
>>> block.srcnodes['A'].data[dgl.NID]
tensor([2, 1])
See also
--------
create_block
"""
if dst_nodes is None:
# Find all nodes that appeared as destinations
dst_nodes = defaultdict(list)
for etype in g.canonical_etypes:
_, dst = g.edges(etype=etype)
dst_nodes[etype[2]].append(dst)
dst_nodes = {
ntype: F.unique(F.cat(values, 0))
for ntype, values in dst_nodes.items()
}
elif not isinstance(dst_nodes, Mapping):
# dst_nodes is a Tensor, check if the g has only one type.
if len(g.ntypes) > 1:
raise DGLError(
"Graph has more than one node type; please specify a dict for dst_nodes."
)
dst_nodes = {g.ntypes[0]: dst_nodes}
dst_node_ids = [
utils.toindex(dst_nodes.get(ntype, []), g._idtype_str).tousertensor(
ctx=F.to_backend_ctx(g._graph.ctx)
)
for ntype in g.ntypes
]
dst_node_ids_nd = [F.to_dgl_nd(nodes) for nodes in dst_node_ids]
for d in dst_node_ids_nd:
if g._graph.ctx != d.ctx:
raise ValueError("g and dst_nodes need to have the same context.")
src_node_ids = None
src_node_ids_nd = None
if src_nodes is not None and not isinstance(src_nodes, Mapping):
# src_nodes is a Tensor, check if the g has only one type.
if len(g.ntypes) > 1:
raise DGLError(
"Graph has more than one node type; please specify a dict for src_nodes."
)
src_nodes = {g.ntypes[0]: src_nodes}
src_node_ids = [
F.copy_to(
F.tensor(src_nodes.get(ntype, []), dtype=g.idtype),
F.to_backend_ctx(g._graph.ctx),
)
for ntype in g.ntypes
]
src_node_ids_nd = [F.to_dgl_nd(nodes) for nodes in src_node_ids]
for d in src_node_ids_nd:
if g._graph.ctx != d.ctx:
raise ValueError(
"g and src_nodes need to have the same context."
)
else:
# use an empty list to signal we need to generate it
src_node_ids_nd = []
new_graph_index, src_nodes_ids_nd, induced_edges_nd = _CAPI_DGLToBlock(
g._graph, dst_node_ids_nd, include_dst_in_src, src_node_ids_nd
)
# The new graph duplicates the original node types to SRC and DST sets.
new_ntypes = (g.ntypes, g.ntypes)
new_graph = DGLBlock(new_graph_index, new_ntypes, g.etypes)
assert new_graph.is_unibipartite # sanity check
src_node_ids = [F.from_dgl_nd(src) for src in src_nodes_ids_nd]
edge_ids = [F.from_dgl_nd(eid) for eid in induced_edges_nd]
node_frames = utils.extract_node_subframes_for_block(
g, src_node_ids, dst_node_ids
)
edge_frames = utils.extract_edge_subframes(g, edge_ids)
utils.set_new_frames(
new_graph, node_frames=node_frames, edge_frames=edge_frames
)
return new_graph
def _coalesce_edge_frame(g, edge_maps, counts, aggregator): def _coalesce_edge_frame(g, edge_maps, counts, aggregator):
r"""Coalesce edge features of duplicate edges via given aggregator in g. r"""Coalesce edge features of duplicate edges via given aggregator in g.
......
# Copyright (c) 2023, DGL Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""To block method."""
from collections import defaultdict
from collections.abc import Mapping
from .. import backend as F, utils
from ..base import DGLError
from ..heterograph import DGLBlock
from .._ffi.capi import *
__all__ = ["to_block"]
def to_block(g, dst_nodes=None, include_dst_in_src=True, src_nodes=None):
"""Convert a graph into a bipartite-structured *block* for message passing.
A block is a graph consisting of two sets of nodes: the
*source* nodes and *destination* nodes. The source and destination nodes can have multiple
node types. All the edges connect from source nodes to destination nodes.
Specifically, the source nodes and destination nodes will have the same node types as the
ones in the original graph. DGL maps each edge ``(u, v)`` with edge type
``(utype, etype, vtype)`` in the original graph to the edge with type
``etype`` connecting from node ID ``u`` of type ``utype`` in the source side to node
ID ``v`` of type ``vtype`` in the destination side.
For blocks returned by :func:`to_block`, the destination nodes of the block will only
contain the nodes that have at least one inbound edge of any type. The source nodes
of the block will only contain the nodes that appear in the destination nodes, as well
as the nodes that have at least one outbound edge connecting to one of the destination nodes.
The destination nodes are specified by the :attr:`dst_nodes` argument if it is not None.
Parameters
----------
graph : DGLGraph
The graph. Can be either on CPU or GPU.
dst_nodes : Tensor or dict[str, Tensor], optional
The list of destination nodes.
If a tensor is given, the graph must have only one node type.
If given, it must be a superset of all the nodes that have at least one inbound
edge. An error will be raised otherwise.
include_dst_in_src : bool
If False, do not include destination nodes in source nodes.
(Default: True)
src_nodes : Tensor or disct[str, Tensor], optional
The list of source nodes (and prefixed by destination nodes if
`include_dst_in_src` is True).
If a tensor is given, the graph must have only one node type.
Returns
-------
DGLBlock
The new graph describing the block.
The node IDs induced for each type in both sides would be stored in feature
``dgl.NID``.
The edge IDs induced for each type would be stored in feature ``dgl.EID``.
Raises
------
DGLError
If :attr:`dst_nodes` is specified but it is not a superset of all the nodes that
have at least one inbound edge.
If :attr:`dst_nodes` is not None, and :attr:`g` and :attr:`dst_nodes`
are not in the same context.
Notes
-----
:func:`to_block` is most commonly used in customizing neighborhood sampling
for stochastic training on a large graph. Please refer to the user guide
:ref:`guide-minibatch` for a more thorough discussion about the methodology
of stochastic training.
See also :func:`create_block` for more flexible construction of blocks.
Examples
--------
Converting a homogeneous graph to a block as described above:
>>> g = dgl.graph(([1, 2], [2, 3]))
>>> block = dgl.to_block(g, torch.LongTensor([3, 2]))
The destination nodes would be exactly the same as the ones given: [3, 2].
>>> induced_dst = block.dstdata[dgl.NID]
>>> induced_dst
tensor([3, 2])
The first few source nodes would also be exactly the same as
the ones given. The rest of the nodes are the ones necessary for message passing
into nodes 3, 2. This means that the node 1 would be included.
>>> induced_src = block.srcdata[dgl.NID]
>>> induced_src
tensor([3, 2, 1])
You can notice that the first two nodes are identical to the given nodes as well as
the destination nodes.
The induced edges can also be obtained by the following:
>>> block.edata[dgl.EID]
tensor([2, 1])
This indicates that edge (2, 3) and (1, 2) are included in the result graph. You can
verify that the first edge in the block indeed maps to the edge (2, 3), and the
second edge in the block indeed maps to the edge (1, 2):
>>> src, dst = block.edges(order='eid')
>>> induced_src[src], induced_dst[dst]
(tensor([2, 1]), tensor([3, 2]))
The destination nodes specified must be a superset of the nodes that have edges connecting
to them. For example, the following will raise an error since the destination nodes
does not contain node 3, which has an edge connecting to it.
>>> g = dgl.graph(([1, 2], [2, 3]))
>>> dgl.to_block(g, torch.LongTensor([2])) # error
Converting a heterogeneous graph to a block is similar, except that when specifying
the destination nodes, you have to give a dict:
>>> g = dgl.heterograph({('A', '_E', 'B'): ([1, 2], [2, 3])})
If you don't specify any node of type A on the destination side, the node type ``A``
in the block would have zero nodes on the destination side.
>>> block = dgl.to_block(g, {'B': torch.LongTensor([3, 2])})
>>> block.number_of_dst_nodes('A')
0
>>> block.number_of_dst_nodes('B')
2
>>> block.dstnodes['B'].data[dgl.NID]
tensor([3, 2])
The source side would contain all the nodes on the destination side:
>>> block.srcnodes['B'].data[dgl.NID]
tensor([3, 2])
As well as all the nodes that have connections to the nodes on the destination side:
>>> block.srcnodes['A'].data[dgl.NID]
tensor([2, 1])
See also
--------
create_block
"""
if dst_nodes is None:
# Find all nodes that appeared as destinations
dst_nodes = defaultdict(list)
for etype in g.canonical_etypes:
_, dst = g.edges(etype=etype)
dst_nodes[etype[2]].append(dst)
dst_nodes = {
ntype: F.unique(F.cat(values, 0))
for ntype, values in dst_nodes.items()
}
elif not isinstance(dst_nodes, Mapping):
# dst_nodes is a Tensor, check if the g has only one type.
if len(g.ntypes) > 1:
raise DGLError(
"Graph has more than one node type; please specify a dict for dst_nodes."
)
dst_nodes = {g.ntypes[0]: dst_nodes}
dst_node_ids = [
utils.toindex(dst_nodes.get(ntype, []), g._idtype_str).tousertensor(
ctx=F.to_backend_ctx(g._graph.ctx)
)
for ntype in g.ntypes
]
dst_node_ids_nd = [F.to_dgl_nd(nodes) for nodes in dst_node_ids]
for d in dst_node_ids_nd:
if g._graph.ctx != d.ctx:
raise ValueError("g and dst_nodes need to have the same context.")
src_node_ids = None
src_node_ids_nd = None
if src_nodes is not None and not isinstance(src_nodes, Mapping):
# src_nodes is a Tensor, check if the g has only one type.
if len(g.ntypes) > 1:
raise DGLError(
"Graph has more than one node type; please specify a dict for src_nodes."
)
src_nodes = {g.ntypes[0]: src_nodes}
src_node_ids = [
F.copy_to(
F.tensor(src_nodes.get(ntype, []), dtype=g.idtype),
F.to_backend_ctx(g._graph.ctx),
)
for ntype in g.ntypes
]
src_node_ids_nd = [F.to_dgl_nd(nodes) for nodes in src_node_ids]
for d in src_node_ids_nd:
if g._graph.ctx != d.ctx:
raise ValueError(
"g and src_nodes need to have the same context."
)
else:
# use an empty list to signal we need to generate it
src_node_ids_nd = []
new_graph_index, src_nodes_ids_nd, induced_edges_nd = _CAPI_DGLToBlock(
g._graph, dst_node_ids_nd, include_dst_in_src, src_node_ids_nd
)
# The new graph duplicates the original node types to SRC and DST sets.
new_ntypes = (g.ntypes, g.ntypes)
new_graph = DGLBlock(new_graph_index, new_ntypes, g.etypes)
assert new_graph.is_unibipartite # sanity check
src_node_ids = [F.from_dgl_nd(src) for src in src_nodes_ids_nd]
edge_ids = [F.from_dgl_nd(eid) for eid in induced_edges_nd]
node_frames = utils.extract_node_subframes_for_block(
g, src_node_ids, dst_node_ids
)
edge_frames = utils.extract_edge_subframes(g, edge_ids)
utils.set_new_frames(
new_graph, node_frames=node_frames, edge_frames=edge_frames
)
return new_graph
...@@ -189,7 +189,7 @@ std::tuple<HeteroGraphPtr, std::vector<IdArray>> ToBlock<kDGLCUDA, int64_t>( ...@@ -189,7 +189,7 @@ std::tuple<HeteroGraphPtr, std::vector<IdArray>> ToBlock<kDGLCUDA, int64_t>(
#endif // DGL_USE_CUDA #endif // DGL_USE_CUDA
DGL_REGISTER_GLOBAL("transform._CAPI_DGLToBlock") DGL_REGISTER_GLOBAL("capi._CAPI_DGLToBlock")
.set_body([](DGLArgs args, DGLRetValue *rv) { .set_body([](DGLArgs args, DGLRetValue *rv) {
const HeteroGraphRef graph_ref = args[0]; const HeteroGraphRef graph_ref = args[0];
const std::vector<IdArray> &rhs_nodes = const std::vector<IdArray> &rhs_nodes =
......
##
# Copyright 2019-2021 Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import backend as F
import dgl
import dgl.partition
from test_utils import parametrize_idtype
@parametrize_idtype
def test_to_block(idtype):
def check(g, bg, ntype, etype, dst_nodes, include_dst_in_src=True):
if dst_nodes is not None:
assert F.array_equal(bg.dstnodes[ntype].data[dgl.NID], dst_nodes)
n_dst_nodes = bg.number_of_nodes("DST/" + ntype)
if include_dst_in_src:
assert F.array_equal(
bg.srcnodes[ntype].data[dgl.NID][:n_dst_nodes],
bg.dstnodes[ntype].data[dgl.NID],
)
g = g[etype]
bg = bg[etype]
induced_src = bg.srcdata[dgl.NID]
induced_dst = bg.dstdata[dgl.NID]
induced_eid = bg.edata[dgl.EID]
bg_src, bg_dst = bg.all_edges(order="eid")
src_ans, dst_ans = g.all_edges(order="eid")
induced_src_bg = F.gather_row(induced_src, bg_src)
induced_dst_bg = F.gather_row(induced_dst, bg_dst)
induced_src_ans = F.gather_row(src_ans, induced_eid)
induced_dst_ans = F.gather_row(dst_ans, induced_eid)
assert F.array_equal(induced_src_bg, induced_src_ans)
assert F.array_equal(induced_dst_bg, induced_dst_ans)
def checkall(g, bg, dst_nodes, include_dst_in_src=True):
for etype in g.etypes:
ntype = g.to_canonical_etype(etype)[2]
if dst_nodes is not None and ntype in dst_nodes:
check(g, bg, ntype, etype, dst_nodes[ntype], include_dst_in_src)
else:
check(g, bg, ntype, etype, None, include_dst_in_src)
# homogeneous graph
g = dgl.graph(
(F.tensor([1, 2], dtype=idtype), F.tensor([2, 3], dtype=idtype))
)
dst_nodes = F.tensor([3, 2], dtype=idtype)
bg = dgl.to_block(g, dst_nodes=dst_nodes)
check(g, bg, "_N", "_E", dst_nodes)
src_nodes = bg.srcnodes["_N"].data[dgl.NID]
bg = dgl.to_block(g, dst_nodes=dst_nodes, src_nodes=src_nodes)
check(g, bg, "_N", "_E", dst_nodes)
# heterogeneous graph
g = dgl.heterograph(
{
("A", "AA", "A"): ([0, 2, 1, 3], [1, 3, 2, 4]),
("A", "AB", "B"): ([0, 1, 3, 1], [1, 3, 5, 6]),
("B", "BA", "A"): ([2, 3], [3, 2]),
},
idtype=idtype,
device=F.ctx(),
)
g.nodes["A"].data["x"] = F.randn((5, 10))
g.nodes["B"].data["x"] = F.randn((7, 5))
g.edges["AA"].data["x"] = F.randn((4, 3))
g.edges["AB"].data["x"] = F.randn((4, 3))
g.edges["BA"].data["x"] = F.randn((2, 3))
g_a = g["AA"]
def check_features(g, bg):
for ntype in bg.srctypes:
for key in g.nodes[ntype].data:
assert F.array_equal(
bg.srcnodes[ntype].data[key],
F.gather_row(
g.nodes[ntype].data[key],
bg.srcnodes[ntype].data[dgl.NID],
),
)
for ntype in bg.dsttypes:
for key in g.nodes[ntype].data:
assert F.array_equal(
bg.dstnodes[ntype].data[key],
F.gather_row(
g.nodes[ntype].data[key],
bg.dstnodes[ntype].data[dgl.NID],
),
)
for etype in bg.canonical_etypes:
for key in g.edges[etype].data:
assert F.array_equal(
bg.edges[etype].data[key],
F.gather_row(
g.edges[etype].data[key], bg.edges[etype].data[dgl.EID]
),
)
bg = dgl.to_block(g_a)
check(g_a, bg, "A", "AA", None)
check_features(g_a, bg)
assert bg.number_of_src_nodes() == 5
assert bg.number_of_dst_nodes() == 4
bg = dgl.to_block(g_a, include_dst_in_src=False)
check(g_a, bg, "A", "AA", None, False)
check_features(g_a, bg)
assert bg.number_of_src_nodes() == 4
assert bg.number_of_dst_nodes() == 4
dst_nodes = F.tensor([4, 3, 2, 1], dtype=idtype)
bg = dgl.to_block(g_a, dst_nodes)
check(g_a, bg, "A", "AA", dst_nodes)
check_features(g_a, bg)
g_ab = g["AB"]
bg = dgl.to_block(g_ab)
assert bg.idtype == idtype
assert bg.number_of_nodes("SRC/B") == 4
assert F.array_equal(
bg.srcnodes["B"].data[dgl.NID], bg.dstnodes["B"].data[dgl.NID]
)
assert bg.number_of_nodes("DST/A") == 0
checkall(g_ab, bg, None)
check_features(g_ab, bg)
dst_nodes = {"B": F.tensor([5, 6, 3, 1], dtype=idtype)}
bg = dgl.to_block(g, dst_nodes)
assert bg.number_of_nodes("SRC/B") == 4
assert F.array_equal(
bg.srcnodes["B"].data[dgl.NID], bg.dstnodes["B"].data[dgl.NID]
)
assert bg.number_of_nodes("DST/A") == 0
checkall(g, bg, dst_nodes)
check_features(g, bg)
dst_nodes = {
"A": F.tensor([4, 3, 2, 1], dtype=idtype),
"B": F.tensor([3, 5, 6, 1], dtype=idtype),
}
bg = dgl.to_block(g, dst_nodes=dst_nodes)
checkall(g, bg, dst_nodes)
check_features(g, bg)
# test specifying lhs_nodes with include_dst_in_src
src_nodes = {}
for ntype in dst_nodes.keys():
# use the previous run to get the list of source nodes
src_nodes[ntype] = bg.srcnodes[ntype].data[dgl.NID]
bg = dgl.to_block(g, dst_nodes=dst_nodes, src_nodes=src_nodes)
checkall(g, bg, dst_nodes)
check_features(g, bg)
# test without include_dst_in_src
dst_nodes = {
"A": F.tensor([4, 3, 2, 1], dtype=idtype),
"B": F.tensor([3, 5, 6, 1], dtype=idtype),
}
bg = dgl.to_block(g, dst_nodes=dst_nodes, include_dst_in_src=False)
checkall(g, bg, dst_nodes, False)
check_features(g, bg)
# test specifying lhs_nodes without include_dst_in_src
src_nodes = {}
for ntype in dst_nodes.keys():
# use the previous run to get the list of source nodes
src_nodes[ntype] = bg.srcnodes[ntype].data[dgl.NID]
bg = dgl.to_block(
g, dst_nodes=dst_nodes, include_dst_in_src=False, src_nodes=src_nodes
)
checkall(g, bg, dst_nodes, False)
check_features(g, bg)
...@@ -1125,177 +1125,6 @@ def test_to_simple(idtype): ...@@ -1125,177 +1125,6 @@ def test_to_simple(idtype):
assert F.array_equal(sg.edge_ids(u, v), eids) assert F.array_equal(sg.edge_ids(u, v), eids)
@parametrize_idtype
def test_to_block(idtype):
def check(g, bg, ntype, etype, dst_nodes, include_dst_in_src=True):
if dst_nodes is not None:
assert F.array_equal(bg.dstnodes[ntype].data[dgl.NID], dst_nodes)
n_dst_nodes = bg.number_of_nodes("DST/" + ntype)
if include_dst_in_src:
assert F.array_equal(
bg.srcnodes[ntype].data[dgl.NID][:n_dst_nodes],
bg.dstnodes[ntype].data[dgl.NID],
)
g = g[etype]
bg = bg[etype]
induced_src = bg.srcdata[dgl.NID]
induced_dst = bg.dstdata[dgl.NID]
induced_eid = bg.edata[dgl.EID]
bg_src, bg_dst = bg.all_edges(order="eid")
src_ans, dst_ans = g.all_edges(order="eid")
induced_src_bg = F.gather_row(induced_src, bg_src)
induced_dst_bg = F.gather_row(induced_dst, bg_dst)
induced_src_ans = F.gather_row(src_ans, induced_eid)
induced_dst_ans = F.gather_row(dst_ans, induced_eid)
assert F.array_equal(induced_src_bg, induced_src_ans)
assert F.array_equal(induced_dst_bg, induced_dst_ans)
def checkall(g, bg, dst_nodes, include_dst_in_src=True):
for etype in g.etypes:
ntype = g.to_canonical_etype(etype)[2]
if dst_nodes is not None and ntype in dst_nodes:
check(g, bg, ntype, etype, dst_nodes[ntype], include_dst_in_src)
else:
check(g, bg, ntype, etype, None, include_dst_in_src)
# homogeneous graph
g = dgl.graph(
(F.tensor([1, 2], dtype=idtype), F.tensor([2, 3], dtype=idtype))
)
dst_nodes = F.tensor([3, 2], dtype=idtype)
bg = dgl.to_block(g, dst_nodes=dst_nodes)
check(g, bg, "_N", "_E", dst_nodes)
src_nodes = bg.srcnodes["_N"].data[dgl.NID]
bg = dgl.to_block(g, dst_nodes=dst_nodes, src_nodes=src_nodes)
check(g, bg, "_N", "_E", dst_nodes)
# heterogeneous graph
g = dgl.heterograph(
{
("A", "AA", "A"): ([0, 2, 1, 3], [1, 3, 2, 4]),
("A", "AB", "B"): ([0, 1, 3, 1], [1, 3, 5, 6]),
("B", "BA", "A"): ([2, 3], [3, 2]),
},
idtype=idtype,
device=F.ctx(),
)
g.nodes["A"].data["x"] = F.randn((5, 10))
g.nodes["B"].data["x"] = F.randn((7, 5))
g.edges["AA"].data["x"] = F.randn((4, 3))
g.edges["AB"].data["x"] = F.randn((4, 3))
g.edges["BA"].data["x"] = F.randn((2, 3))
g_a = g["AA"]
def check_features(g, bg):
for ntype in bg.srctypes:
for key in g.nodes[ntype].data:
assert F.array_equal(
bg.srcnodes[ntype].data[key],
F.gather_row(
g.nodes[ntype].data[key],
bg.srcnodes[ntype].data[dgl.NID],
),
)
for ntype in bg.dsttypes:
for key in g.nodes[ntype].data:
assert F.array_equal(
bg.dstnodes[ntype].data[key],
F.gather_row(
g.nodes[ntype].data[key],
bg.dstnodes[ntype].data[dgl.NID],
),
)
for etype in bg.canonical_etypes:
for key in g.edges[etype].data:
assert F.array_equal(
bg.edges[etype].data[key],
F.gather_row(
g.edges[etype].data[key], bg.edges[etype].data[dgl.EID]
),
)
bg = dgl.to_block(g_a)
check(g_a, bg, "A", "AA", None)
check_features(g_a, bg)
assert bg.number_of_src_nodes() == 5
assert bg.number_of_dst_nodes() == 4
bg = dgl.to_block(g_a, include_dst_in_src=False)
check(g_a, bg, "A", "AA", None, False)
check_features(g_a, bg)
assert bg.number_of_src_nodes() == 4
assert bg.number_of_dst_nodes() == 4
dst_nodes = F.tensor([4, 3, 2, 1], dtype=idtype)
bg = dgl.to_block(g_a, dst_nodes)
check(g_a, bg, "A", "AA", dst_nodes)
check_features(g_a, bg)
g_ab = g["AB"]
bg = dgl.to_block(g_ab)
assert bg.idtype == idtype
assert bg.number_of_nodes("SRC/B") == 4
assert F.array_equal(
bg.srcnodes["B"].data[dgl.NID], bg.dstnodes["B"].data[dgl.NID]
)
assert bg.number_of_nodes("DST/A") == 0
checkall(g_ab, bg, None)
check_features(g_ab, bg)
dst_nodes = {"B": F.tensor([5, 6, 3, 1], dtype=idtype)}
bg = dgl.to_block(g, dst_nodes)
assert bg.number_of_nodes("SRC/B") == 4
assert F.array_equal(
bg.srcnodes["B"].data[dgl.NID], bg.dstnodes["B"].data[dgl.NID]
)
assert bg.number_of_nodes("DST/A") == 0
checkall(g, bg, dst_nodes)
check_features(g, bg)
dst_nodes = {
"A": F.tensor([4, 3, 2, 1], dtype=idtype),
"B": F.tensor([3, 5, 6, 1], dtype=idtype),
}
bg = dgl.to_block(g, dst_nodes=dst_nodes)
checkall(g, bg, dst_nodes)
check_features(g, bg)
# test specifying lhs_nodes with include_dst_in_src
src_nodes = {}
for ntype in dst_nodes.keys():
# use the previous run to get the list of source nodes
src_nodes[ntype] = bg.srcnodes[ntype].data[dgl.NID]
bg = dgl.to_block(g, dst_nodes=dst_nodes, src_nodes=src_nodes)
checkall(g, bg, dst_nodes)
check_features(g, bg)
# test without include_dst_in_src
dst_nodes = {
"A": F.tensor([4, 3, 2, 1], dtype=idtype),
"B": F.tensor([3, 5, 6, 1], dtype=idtype),
}
bg = dgl.to_block(g, dst_nodes=dst_nodes, include_dst_in_src=False)
checkall(g, bg, dst_nodes, False)
check_features(g, bg)
# test specifying lhs_nodes without include_dst_in_src
src_nodes = {}
for ntype in dst_nodes.keys():
# use the previous run to get the list of source nodes
src_nodes[ntype] = bg.srcnodes[ntype].data[dgl.NID]
bg = dgl.to_block(
g, dst_nodes=dst_nodes, include_dst_in_src=False, src_nodes=src_nodes
)
checkall(g, bg, dst_nodes, False)
check_features(g, bg)
@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented") @unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
@parametrize_idtype @parametrize_idtype
def test_remove_edges(idtype): def test_remove_edges(idtype):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment