transform.py 47.2 KB
Newer Older
1
2
"""Module for graph transformation utilities."""

3
from collections.abc import Iterable, Mapping
4
from collections import defaultdict
5
6
import numpy as np
from scipy import sparse
7
from ._ffi.function import _init_api
8
from .graph import DGLGraph
9
10
from .heterograph import DGLHeteroGraph
from . import ndarray as nd
11
from . import backend as F
12
from .graph_index import from_coo
13
from .graph_index import _get_halo_subgraph_inner_node
14
from .graph import unbatch
Mufei Li's avatar
Mufei Li committed
15
from .convert import graph, bipartite
16
from . import utils
17
18
from .base import EID, NID
from . import ndarray as nd
19

20

21
22
23
24
25
26
27
28
29
30
31
32
33
34
__all__ = [
    'line_graph',
    'khop_adj',
    'khop_graph',
    'reverse',
    'to_simple_graph',
    'to_bidirected',
    'laplacian_lambda_max',
    'knn_graph',
    'segmented_knn_graph',
    'add_self_loop',
    'remove_self_loop',
    'metapath_reachable_graph',
    'compact_graphs',
35
    'to_block',
36
37
    'to_simple',
    'in_subgraph',
38
    'out_subgraph',
39
40
41
    'remove_edges',
    'as_immutable_graph',
    'as_heterograph']
42
43


44
45
46
47
48
49
50
51
52
53
def pairwise_squared_distance(x):
    """
    x : (n_samples, n_points, dims)
    return : (n_samples, n_points, n_points)
    """
    x2s = F.sum(x * x, -1, True)
    # assuming that __matmul__ is always implemented (true for PyTorch, MXNet and Chainer)
    return x2s + F.swapaxes(x2s, -1, -2) - 2 * x @ F.swapaxes(x, -1, -2)

#pylint: disable=invalid-name
54
def knn_graph(x, k):
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
    """Transforms the given point set to a directed graph, whose coordinates
    are given as a matrix. The predecessors of each point are its k-nearest
    neighbors.

    If a 3D tensor is given instead, then each row would be transformed into
    a separate graph.  The graphs will be unioned.

    Parameters
    ----------
    x : Tensor
        The input tensor.

        If 2D, each row of ``x`` corresponds to a node.

        If 3D, a k-NN graph would be constructed for each row.  Then
        the graphs are unioned.
    k : int
        The number of neighbors

    Returns
    -------
    DGLGraph
        The graph.  The node IDs are in the same order as ``x``.
    """
    if F.ndim(x) == 2:
        x = F.unsqueeze(x, 0)
    n_samples, n_points, _ = F.shape(x)

    dist = pairwise_squared_distance(x)
    k_indices = F.argtopk(dist, k, 2, descending=False)
    dst = F.copy_to(k_indices, F.cpu())

    src = F.zeros_like(dst) + F.reshape(F.arange(0, n_points), (1, -1, 1))

    per_sample_offset = F.reshape(F.arange(0, n_samples) * n_points, (-1, 1, 1))
    dst += per_sample_offset
    src += per_sample_offset
    dst = F.reshape(dst, (-1,))
    src = F.reshape(src, (-1,))
94
95
    adj = sparse.csr_matrix(
        (F.asnumpy(F.zeros_like(dst) + 1), (F.asnumpy(dst), F.asnumpy(src))),
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
96
        shape=(n_samples * n_points, n_samples * n_points))
97
98
99
100
101

    g = DGLGraph(adj, readonly=True)
    return g

#pylint: disable=invalid-name
102
def segmented_knn_graph(x, k, segs):
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
    """Transforms the given point set to a directed graph, whose coordinates
    are given as a matrix.  The predecessors of each point are its k-nearest
    neighbors.

    The matrices are concatenated along the first axis, and are segmented by
    ``segs``.  Each block would be transformed into a separate graph.  The
    graphs will be unioned.

    Parameters
    ----------
    x : Tensor
        The input tensor.
    k : int
        The number of neighbors
    segs : iterable of int
        Number of points of each point set.
        Must sum up to the number of rows in ``x``.

    Returns
    -------
    DGLGraph
        The graph.  The node IDs are in the same order as ``x``.
    """
    n_total_points, _ = F.shape(x)
    offset = np.insert(np.cumsum(segs), 0, 0)

    h_list = F.split(x, segs, 0)
    dst = [
        F.argtopk(pairwise_squared_distance(h_g), k, 1, descending=False) +
Quan (Andy) Gan's avatar
Quan (Andy) Gan committed
132
        int(offset[i])
133
134
135
136
137
138
139
140
141
142
143
        for i, h_g in enumerate(h_list)]
    dst = F.cat(dst, 0)
    src = F.arange(0, n_total_points).unsqueeze(1).expand(n_total_points, k)

    dst = F.reshape(dst, (-1,))
    src = F.reshape(src, (-1,))
    adj = sparse.csr_matrix((F.asnumpy(F.zeros_like(dst) + 1), (F.asnumpy(dst), F.asnumpy(src))))

    g = DGLGraph(adj, readonly=True)
    return g

144
145
146
147
148
149
def line_graph(g, backtracking=True, shared=False):
    """Return the line graph of this graph.

    Parameters
    ----------
    g : dgl.DGLGraph
150
        The input graph.
151
152
153
154
155
156
157
158
159
160
161
162
163
164
    backtracking : bool, optional
        Whether the returned line graph is backtracking.
    shared : bool, optional
        Whether the returned line graph shares representations with `self`.

    Returns
    -------
    DGLGraph
        The line graph of this graph.
    """
    graph_data = g._graph.line_graph(backtracking)
    node_frame = g._edge_frame if shared else None
    return DGLGraph(graph_data, node_frame)

165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
def khop_adj(g, k):
    """Return the matrix of :math:`A^k` where :math:`A` is the adjacency matrix of :math:`g`,
    where a row represents the destination and a column represents the source.

    Parameters
    ----------
    g : dgl.DGLGraph
        The input graph.
    k : int
        The :math:`k` in :math:`A^k`.

    Returns
    -------
    tensor
        The returned tensor, dtype is ``np.float32``.

    Examples
    --------

    >>> import dgl
    >>> g = dgl.DGLGraph()
    >>> g.add_nodes(5)
    >>> g.add_edges([0,1,2,3,4,0,1,2,3,4], [0,1,2,3,4,1,2,3,4,0])
    >>> dgl.khop_adj(g, 1)
    tensor([[1., 0., 0., 0., 1.],
            [1., 1., 0., 0., 0.],
            [0., 1., 1., 0., 0.],
            [0., 0., 1., 1., 0.],
            [0., 0., 0., 1., 1.]])
    >>> dgl.khop_adj(g, 3)
    tensor([[1., 0., 1., 3., 3.],
            [3., 1., 0., 1., 3.],
            [3., 3., 1., 0., 1.],
            [1., 3., 3., 1., 0.],
            [0., 1., 3., 3., 1.]])
    """
    adj_k = g.adjacency_matrix_scipy(return_edge_ids=False) ** k
202
    return F.tensor(adj_k.todense().astype(np.float32))
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223

def khop_graph(g, k):
    """Return the graph that includes all :math:`k`-hop neighbors of the given graph as edges.
    The adjacency matrix of the returned graph is :math:`A^k`
    (where :math:`A` is the adjacency matrix of :math:`g`).

    Parameters
    ----------
    g : dgl.DGLGraph
        The input graph.
    k : int
        The :math:`k` in `k`-hop graph.

    Returns
    -------
    dgl.DGLGraph
        The returned ``DGLGraph``.

    Examples
    --------

Mufei Li's avatar
Mufei Li committed
224
225
226
227
228
229
230
231
232
233
234
235
    Below gives an easy example:

    >>> import dgl
    >>> g = dgl.DGLGraph()
    >>> g.add_nodes(3)
    >>> g.add_edges([0, 1], [1, 2])
    >>> g_2 = dgl.transform.khop_graph(g, 2)
    >>> print(g_2.edges())
    (tensor([0]), tensor([2]))

    A more complicated example:

236
237
238
239
240
241
242
243
244
245
246
247
248
249
    >>> import dgl
    >>> g = dgl.DGLGraph()
    >>> g.add_nodes(5)
    >>> g.add_edges([0,1,2,3,4,0,1,2,3,4], [0,1,2,3,4,1,2,3,4,0])
    >>> dgl.khop_graph(g, 1)
    DGLGraph(num_nodes=5, num_edges=10,
             ndata_schemes={}
             edata_schemes={})
    >>> dgl.khop_graph(g, 3)
    DGLGraph(num_nodes=5, num_edges=40,
             ndata_schemes={}
             edata_schemes={})
    """
    n = g.number_of_nodes()
Mufei Li's avatar
Mufei Li committed
250
    adj_k = g.adjacency_matrix_scipy(transpose=True, return_edge_ids=False) ** k
251
252
253
254
255
256
    adj_k = adj_k.tocoo()
    multiplicity = adj_k.data
    row = np.repeat(adj_k.row, multiplicity)
    col = np.repeat(adj_k.col, multiplicity)
    # TODO(zihao): we should support creating multi-graph from scipy sparse matrix
    # in the future.
257
    return DGLGraph(from_coo(n, row, col, True))
258

259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
def reverse(g, share_ndata=False, share_edata=False):
    """Return the reverse of a graph

    The reverse (also called converse, transpose) of a directed graph is another directed
    graph on the same nodes with edges reversed in terms of direction.

    Given a :class:`DGLGraph` object, we return another :class:`DGLGraph` object
    representing its reverse.

    Notes
    -----
    * We do not dynamically update the topology of a graph once that of its reverse changes.
      This can be particularly problematic when the node/edge attrs are shared. For example,
      if the topology of both the original graph and its reverse get changed independently,
      you can get a mismatched node/edge feature.

    Parameters
    ----------
    g : dgl.DGLGraph
278
        The input graph.
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
    share_ndata: bool, optional
        If True, the original graph and the reversed graph share memory for node attributes.
        Otherwise the reversed graph will not be initialized with node attributes.
    share_edata: bool, optional
        If True, the original graph and the reversed graph share memory for edge attributes.
        Otherwise the reversed graph will not have edge attributes.

    Examples
    --------
    Create a graph to reverse.

    >>> import dgl
    >>> import torch as th
    >>> g = dgl.DGLGraph()
    >>> g.add_nodes(3)
    >>> g.add_edges([0, 1, 2], [1, 2, 0])
    >>> g.ndata['h'] = th.tensor([[0.], [1.], [2.]])
    >>> g.edata['h'] = th.tensor([[3.], [4.], [5.]])

    Reverse the graph and examine its structure.

    >>> rg = g.reverse(share_ndata=True, share_edata=True)
    >>> print(rg)
    DGLGraph with 3 nodes and 3 edges.
    Node data: {'h': Scheme(shape=(1,), dtype=torch.float32)}
    Edge data: {'h': Scheme(shape=(1,), dtype=torch.float32)}

    The edges are reversed now.

    >>> rg.has_edges_between([1, 2, 0], [0, 1, 2])
    tensor([1, 1, 1])

    Reversed edges have the same feature as the original ones.

    >>> g.edges[[0, 2], [1, 0]].data['h'] == rg.edges[[1, 0], [0, 2]].data['h']
    tensor([[1],
            [1]], dtype=torch.uint8)

    The node/edge features of the reversed graph share memory with the original
    graph, which is helpful for both forward computation and back propagation.

    >>> g.ndata['h'] = g.ndata['h'] + 1
    >>> rg.ndata['h']
    tensor([[1.],
            [2.],
            [3.]])
    """
326
    g_reversed = DGLGraph()
327
    g_reversed.add_nodes(g.number_of_nodes())
Mufei Li's avatar
Mufei Li committed
328
    g_edges = g.all_edges(order='eid')
329
    g_reversed.add_edges(g_edges[1], g_edges[0])
330
331
    g_reversed._batch_num_nodes = g._batch_num_nodes
    g_reversed._batch_num_edges = g._batch_num_edges
332
333
334
335
336
    if share_ndata:
        g_reversed._node_frame = g._node_frame
    if share_edata:
        g_reversed._edge_frame = g._edge_frame
    return g_reversed
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352

def to_simple_graph(g):
    """Convert the graph to a simple graph with no multi-edge.

    The function generates a new *readonly* graph with no node/edge feature.

    Parameters
    ----------
    g : DGLGraph
        The input graph.

    Returns
    -------
    DGLGraph
        A simple graph.
    """
353
354
    gidx = _CAPI_DGLToSimpleGraph(g._graph)
    return DGLGraph(gidx, readonly=True)
355

356
357
358
359
def to_bidirected(g, readonly=True):
    """Convert the graph to a bidirected graph.

    The function generates a new graph with no node/edge feature.
360
361
362
363
364
    If g has an edge for i->j but no edge for j->i, then the
    returned graph will have both i->j and j->i.

    If the input graph is a multigraph (there are multiple edges from node i to node j),
    the returned graph isn't well defined.
365
366
367
368
369
370
371
372

    Parameters
    ----------
    g : DGLGraph
        The input graph.
    readonly : bool, default to be True
        Whether the returned bidirected graph is readonly or not.

373
374
375
376
    Notes
    -----
    Please make sure g is a single graph, otherwise the return value is undefined.

377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
    Returns
    -------
    DGLGraph

    Examples
    --------
    The following two examples use PyTorch backend, one for non-multi graph
    and one for multi-graph.

    >>> g = dgl.DGLGraph()
    >>> g.add_nodes(2)
    >>> g.add_edges([0, 0], [0, 1])
    >>> bg1 = dgl.to_bidirected(g)
    >>> bg1.edges()
    (tensor([0, 1, 0]), tensor([0, 0, 1]))
    """
    if readonly:
394
        newgidx = _CAPI_DGLToBidirectedImmutableGraph(g._graph)
395
    else:
396
        newgidx = _CAPI_DGLToBidirectedMutableGraph(g._graph)
397
398
    return DGLGraph(newgidx)

399
400
401
402
403
404
405
406
def laplacian_lambda_max(g):
    """Return the largest eigenvalue of the normalized symmetric laplacian of g.

    The eigenvalue of the normalized symmetric of any graph is less than or equal to 2,
    ref: https://en.wikipedia.org/wiki/Laplacian_matrix#Properties

    Parameters
    ----------
407
    g : DGLGraph
408
409
410
411
412
        The input graph, it should be an undirected graph.

    Returns
    -------
    list :
413
414
        Return a list, where the i-th item indicates the largest eigenvalue
        of i-th graph in g.
415
416
417
418
419
420
421
422
423
424
425

    Examples
    --------

    >>> import dgl
    >>> g = dgl.DGLGraph()
    >>> g.add_nodes(5)
    >>> g.add_edges([0, 1, 2, 3, 4, 0, 1, 2, 3, 4], [1, 2, 3, 4, 0, 4, 0, 1, 2, 3])
    >>> dgl.laplacian_lambda_max(g)
    [1.809016994374948]
    """
426
    g_arr = unbatch(g)
427
428
429
430
    rst = []
    for g_i in g_arr:
        n = g_i.number_of_nodes()
        adj = g_i.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
431
        norm = sparse.diags(F.asnumpy(g_i.in_degrees()).clip(1) ** -0.5, dtype=float)
432
433
434
435
436
        laplacian = sparse.eye(n) - norm * adj * norm
        rst.append(sparse.linalg.eigs(laplacian, 1, which='LM',
                                      return_eigenvectors=False)[0].real)
    return rst

Mufei Li's avatar
Mufei Li committed
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
def metapath_reachable_graph(g, metapath):
    """Return a graph where the successors of any node ``u`` are nodes reachable from ``u`` by
    the given metapath.

    If the beginning node type ``s`` and ending node type ``t`` are the same, it will return
    a homogeneous graph with node type ``s = t``.  Otherwise, a unidirectional bipartite graph
    with source node type ``s`` and destination node type ``t`` is returned.

    In both cases, two nodes ``u`` and ``v`` will be connected with an edge ``(u, v)`` if
    there exists one path matching the metapath from ``u`` to ``v``.

    The result graph keeps the node set of type ``s`` and ``t`` in the original graph even if
    they might have no neighbor.

    The features of the source/destination node type in the original graph would be copied to
    the new graph.

    Parameters
    ----------
    g : DGLHeteroGraph
        The input graph
    metapath : list[str or tuple of str]
        Metapath in the form of a list of edge types

    Returns
    -------
    DGLHeteroGraph
        A homogeneous or bipartite graph.
    """
    adj = 1
467
    index_dtype = g._idtype_str
Mufei Li's avatar
Mufei Li committed
468
469
470
471
472
473
474
475
    for etype in metapath:
        adj = adj * g.adj(etype=etype, scipy_fmt='csr', transpose=True)

    adj = (adj != 0).tocsr()
    srctype = g.to_canonical_etype(metapath[0])[0]
    dsttype = g.to_canonical_etype(metapath[-1])[2]
    if srctype == dsttype:
        assert adj.shape[0] == adj.shape[1]
476
        new_g = graph(adj, ntype=srctype, index_dtype=index_dtype)
Mufei Li's avatar
Mufei Li committed
477
    else:
478
        new_g = bipartite(adj, utype=srctype, vtype=dsttype, index_dtype=index_dtype)
Mufei Li's avatar
Mufei Li committed
479
480
481
482
483
484
485
486

    for key, value in g.nodes[srctype].data.items():
        new_g.nodes[srctype].data[key] = value
    if srctype != dsttype:
        for key, value in g.nodes[dsttype].data.items():
            new_g.nodes[dsttype].data[key] = value

    return new_g
487

VoVAllen's avatar
VoVAllen committed
488
489
490
491
def add_self_loop(g):
    """Return a new graph containing all the edges in the input graph plus self loops
    of every nodes.
    No duplicate self loop will be added for nodes already having self loops.
492
493
494
495
496
497
498
499
    Self-loop edges id are not preserved. All self-loop edges would be added at the end.

    Examples
    ---------

    >>> g = DGLGraph()
    >>> g.add_nodes(5)
    >>> g.add_edges([0, 1, 2], [1, 1, 2])
VoVAllen's avatar
VoVAllen committed
500
    >>> new_g = dgl.transform.add_self_loop(g) # Nodes 0, 3, 4 don't have self-loop
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
    >>> new_g.edges()
    (tensor([0, 0, 1, 2, 3, 4]), tensor([1, 0, 1, 2, 3, 4]))

    Parameters
    ------------
    g: DGLGraph

    Returns
    --------
    DGLGraph
    """
    new_g = DGLGraph()
    new_g.add_nodes(g.number_of_nodes())
    src, dst = g.all_edges(order="eid")
    src = F.zerocopy_to_numpy(src)
    dst = F.zerocopy_to_numpy(dst)
    non_self_edges_idx = src != dst
    nodes = np.arange(g.number_of_nodes())
    new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
    new_g.add_edges(nodes, nodes)
    return new_g

def remove_self_loop(g):
    """Return a new graph with all self-loop edges removed

    Examples
    ---------

    >>> g = DGLGraph()
    >>> g.add_nodes(5)
    >>> g.add_edges([0, 1, 2], [1, 1, 2])
    >>> new_g = dgl.transform.remove_self_loop(g) # Nodes 1, 2 have self-loop
    >>> new_g.edges()
    (tensor([0]), tensor([1]))

    Parameters
    ------------
    g: DGLGraph

    Returns
    --------
    DGLGraph
    """
    new_g = DGLGraph()
    new_g.add_nodes(g.number_of_nodes())
    src, dst = g.all_edges(order="eid")
    src = F.zerocopy_to_numpy(src)
    dst = F.zerocopy_to_numpy(dst)
    non_self_edges_idx = src != dst
    new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
    return new_g

Da Zheng's avatar
Da Zheng committed
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
def reorder_nodes(g, new_node_ids):
    """ Generate a new graph with new node Ids.

    We assign each node in the input graph with a new node Id. This results in
    a new graph.

    Parameters
    ----------
    g : DGLGraph
        The input graph
    new_node_ids : a tensor
        The new node Ids
    Returns
    -------
    DGLGraph
        The graph with new node Ids.
    """
    assert len(new_node_ids) == g.number_of_nodes(), \
            "The number of new node ids must match #nodes in the graph."
    new_node_ids = utils.toindex(new_node_ids)
    sorted_ids, idx = F.sort_1d(new_node_ids.tousertensor())
    assert F.asnumpy(sorted_ids[0]) == 0 \
            and F.asnumpy(sorted_ids[-1]) == g.number_of_nodes() - 1, \
            "The new node Ids are incorrect."
    new_gidx = _CAPI_DGLReorderGraph(g._graph, new_node_ids.todgltensor())
    new_g = DGLGraph(new_gidx)
    new_g.ndata['orig_id'] = idx
    return new_g

def partition_graph_with_halo(g, node_part, extra_cached_hops, reshuffle=False):
    '''Partition a graph.

    Based on the given node assignments for each partition, the function splits
    the input graph into subgraphs. A subgraph may contain HALO nodes which does
    not belong to the partition of a subgraph but are connected to the nodes
    in the partition within a fixed number of hops.

    If `reshuffle` is turned on, the function reshuffles node Ids and edge Ids
    of the input graph before partitioning. After reshuffling, all nodes and edges
    in a partition fall in a contiguous Id range in the input graph.
    The partitioend subgraphs have node data 'orig_id', which stores the node Ids
    in the original input graph.
595
596
597
598
599
600
601
602
603

    Parameters
    ------------
    g: DGLGraph
        The graph to be partitioned
    node_part: 1D tensor
        Specify which partition a node is assigned to. The length of this tensor
        needs to be the same as the number of nodes of the graph. Each element
        indicates the partition Id of a node.
Da Zheng's avatar
Da Zheng committed
604
    extra_cached_hops: int
605
        The number of hops a HALO node can be accessed.
Da Zheng's avatar
Da Zheng committed
606
607
608
    reshuffle : bool
        Resuffle nodes so that nodes in the same partition are in the same Id range.

609
610
611
612
613
614
615
    Returns
    --------
    a dict of DGLGraphs
        The key is the partition Id and the value is the DGLGraph of the partition.
    '''
    assert len(node_part) == g.number_of_nodes()
    node_part = utils.toindex(node_part)
Da Zheng's avatar
Da Zheng committed
616
617
618
    if reshuffle:
        node_part = node_part.tousertensor()
        sorted_part, new2old_map = F.sort_1d(node_part)
619
620
        new_node_ids = np.zeros((g.number_of_nodes(),), dtype=np.int64)
        new_node_ids[F.asnumpy(new2old_map)] = np.arange(0, g.number_of_nodes())
Da Zheng's avatar
Da Zheng committed
621
622
623
624
625
626
627
628
629
        g = reorder_nodes(g, new_node_ids)
        node_part = utils.toindex(sorted_part)
        # We reassign edges in in-CSR. In this way, after partitioning, we can ensure
        # that all edges in a partition are in the contiguous Id space.
        orig_eids = _CAPI_DGLReassignEdges(g._graph, True)
        orig_eids = utils.toindex(orig_eids)
        g.edata['orig_id'] = orig_eids.tousertensor()

    subgs = _CAPI_DGLPartitionWithHalo(g._graph, node_part.todgltensor(), extra_cached_hops)
630
    subg_dict = {}
Da Zheng's avatar
Da Zheng committed
631
    node_part = node_part.tousertensor()
632
633
    for i, subg in enumerate(subgs):
        inner_node = _get_halo_subgraph_inner_node(subg)
634
        subg = g._create_subgraph(subg, subg.induced_nodes, subg.induced_edges)
635
636
        inner_node = F.zerocopy_from_dlpack(inner_node.to_dlpack())
        subg.ndata['inner_node'] = inner_node
Da Zheng's avatar
Da Zheng committed
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
        subg.ndata['part_id'] = F.gather_row(node_part, subg.parent_nid)
        if reshuffle:
            subg.ndata['orig_id'] = F.gather_row(g.ndata['orig_id'], subg.ndata[NID])
            subg.edata['orig_id'] = F.gather_row(g.edata['orig_id'], subg.edata[EID])

        if extra_cached_hops >= 1:
            inner_edge = F.zeros((subg.number_of_edges(),), F.int64, F.cpu())
            inner_nids = F.nonzero_1d(subg.ndata['inner_node'])
            # TODO(zhengda) we need to fix utils.toindex() to avoid the dtype cast below.
            inner_nids = F.astype(inner_nids, F.int64)
            inner_eids = subg.in_edges(inner_nids, form='eid')
            inner_edge = F.scatter_row(inner_edge, inner_eids,
                                       F.ones((len(inner_eids),), F.dtype(inner_edge), F.cpu()))
        else:
            inner_edge = F.ones((subg.number_of_edges(),), F.int64, F.cpu())
652
653
654
655
        subg.edata['inner_edge'] = inner_edge
        subg_dict[i] = subg
    return subg_dict

656
def metis_partition_assignment(g, k, balance_ntypes=None, balance_edges=False):
657
658
    ''' This assigns nodes to different partitions with Metis partitioning algorithm.

659
660
661
662
663
664
665
666
667
668
    When performing Metis partitioning, we can put some constraint on the partitioning.
    Current, it supports two constrants to balance the partitioning. By default, Metis
    always tries to balance the number of nodes in each partition.

    * `balance_ntypes` balances the number of nodes of different types in each partition.
    * `balance_edges` balances the number of edges in each partition.

    To balance the node types, a user needs to pass a vector of N elements to indicate
    the type of each node. N is the number of nodes in the input graph.

669
670
671
672
673
674
675
676
    After the partition assignment, we construct partitions.

    Parameters
    ----------
    g : DGLGraph
        The graph to be partitioned
    k : int
        The number of partitions.
677
678
679
680
    balance_ntypes : tensor
        Node type of each node
    balance_edges : bool
        Indicate whether to balance the edges.
681
682
683
684
685

    Returns
    -------
    a 1-D tensor
        A vector with each element that indicates the partition Id of a vertex.
686
    '''
687
688
689
    # METIS works only on symmetric graphs.
    # The METIS runs on the symmetric graph to generate the node assignment to partitions.
    sym_g = to_bidirected(g, readonly=True)
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
    vwgt = []
    # To balance the node types in each partition, we can take advantage of the vertex weights
    # in Metis. When vertex weights are provided, Metis will tries to generate partitions with
    # balanced vertex weights. A vertex can be assigned with multiple weights. The vertex weights
    # are stored in a vector of N * w elements, where N is the number of vertices and w
    # is the number of weights per vertex. Metis tries to balance the first weight, and then
    # the second weight, and so on.
    # When balancing node types, we use the first weight to indicate the first node type.
    # if a node belongs to the first node type, its weight is set to 1; otherwise, 0.
    # Similary, we set the second weight for the second node type and so on. The number
    # of weights is the same as the number of node types.
    if balance_ntypes is not None:
        assert len(balance_ntypes) == g.number_of_nodes(), \
                "The length of balance_ntypes should be equal to #nodes in the graph"
        balance_ntypes = utils.toindex(balance_ntypes)
        balance_ntypes = balance_ntypes.tousertensor()
        uniq_ntypes = F.unique(balance_ntypes)
        for ntype in uniq_ntypes:
            vwgt.append(F.astype(balance_ntypes == ntype, F.int64))

    # When balancing edges in partitions, we use in-degree as one of the weights.
    if balance_edges:
        vwgt.append(F.astype(g.in_degrees(), F.int64))

    # The vertex weights have to be stored in a vector.
    if len(vwgt) > 0:
        vwgt = F.stack(vwgt, 1)
        shape = (np.prod(F.shape(vwgt),),)
        vwgt = F.reshape(vwgt, shape)
        vwgt = F.zerocopy_to_dgl_ndarray(vwgt)
    else:
        vwgt = F.zeros((0,), F.int64, F.cpu())
        vwgt = F.zerocopy_to_dgl_ndarray(vwgt)

    node_part = _CAPI_DGLMetisPartition(sym_g._graph, k, vwgt)
725
726
727
728
729
730
    if len(node_part) == 0:
        return None
    else:
        node_part = utils.toindex(node_part)
        return node_part.tousertensor()

731
732
def metis_partition(g, k, extra_cached_hops=0, reshuffle=False,
                    balance_ntypes=None, balance_edges=False):
733
    ''' This is to partition a graph with Metis partitioning.
734

Da Zheng's avatar
Da Zheng committed
735
736
737
738
    Metis assigns vertices to partitions. This API constructs subgraphs with the vertices assigned
    to the partitions and their incoming edges. A subgraph may contain HALO nodes which does
    not belong to the partition of a subgraph but are connected to the nodes
    in the partition within a fixed number of hops.
739

740
741
742
743
744
745
746
747
748
749
    When performing Metis partitioning, we can put some constraint on the partitioning.
    Current, it supports two constrants to balance the partitioning. By default, Metis
    always tries to balance the number of nodes in each partition.

    * `balance_ntypes` balances the number of nodes of different types in each partition.
    * `balance_edges` balances the number of edges in each partition.

    To balance the node types, a user needs to pass a vector of N elements to indicate
    the type of each node. N is the number of nodes in the input graph.

Da Zheng's avatar
Da Zheng committed
750
751
752
753
754
755
756
757
758
    If `reshuffle` is turned on, the function reshuffles node Ids and edge Ids
    of the input graph before partitioning. After reshuffling, all nodes and edges
    in a partition fall in a contiguous Id range in the input graph.
    The partitioend subgraphs have node data 'orig_id', which stores the node Ids
    in the original input graph.

    The partitioned subgraph is stored in DGLGraph. The DGLGraph has the `part_id`
    node data that indicates the partition a node belongs to. The subgraphs do not contain
    the node/edge data in the input graph.
759
760
761
762
763
764
765
766
767

    Parameters
    ------------
    g: DGLGraph
        The graph to be partitioned
    k: int
        The number of partitions.
    extra_cached_hops: int
        The number of hops a HALO node can be accessed.
Da Zheng's avatar
Da Zheng committed
768
769
    reshuffle : bool
        Resuffle nodes so that nodes in the same partition are in the same Id range.
770
771
772
773
    balance_ntypes : tensor
        Node type of each node
    balance_edges : bool
        Indicate whether to balance the edges.
Da Zheng's avatar
Da Zheng committed
774

775
776
777
778
779
    Returns
    --------
    a dict of DGLGraphs
        The key is the partition Id and the value is the DGLGraph of the partition.
    '''
780
781
    node_part = metis_partition_assignment(g, k, balance_ntypes, balance_edges)
    if node_part is None:
782
783
        return None

784
    # Then we split the original graph into parts based on the METIS partitioning results.
Da Zheng's avatar
Da Zheng committed
785
    return partition_graph_with_halo(g, node_part, extra_cached_hops, reshuffle)
786

787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
def compact_graphs(graphs, always_preserve=None):
    """Given a list of graphs with the same set of nodes, find and eliminate the common
    isolated nodes across all graphs.

    This function requires the graphs to have the same set of nodes (i.e. the node types
    must be the same, and the number of nodes of each node type must be the same).  The
    metagraph does not have to be the same.

    It finds all the nodes that have zero in-degree and zero out-degree in all the given
    graphs, and eliminates them from all the graphs.

    Useful for graph sampling where we have a giant graph but we only wish to perform
    message passing on a smaller graph with a (tiny) subset of nodes.

    The node and edge features are not preserved.

    Parameters
    ----------
    graphs : DGLHeteroGraph or list[DGLHeteroGraph]
        The graph, or list of graphs
    always_preserve : Tensor or dict[str, Tensor], optional
        If a dict of node types and node ID tensors is given, the nodes of given
        node types would not be removed, regardless of whether they are isolated.
        If a Tensor is given, assume that all the graphs have one (same) node type.

    Returns
    -------
    DGLHeteroGraph or list[DGLHeteroGraph]
        The compacted graph or list of compacted graphs.

        Each returned graph would have a feature ``dgl.NID`` containing the mapping
        of node IDs for each type from the compacted graph(s) to the original graph(s).
        Note that the mapping is the same for all the compacted graphs.

    Bugs
    ----
    This function currently requires that the same node type of all graphs should have
    the same node type ID, i.e. the node types are *ordered* the same.

    Examples
    --------
    The following code constructs a bipartite graph with 20 users and 10 games, but
    only user #1 and #3, as well as game #3 and #5, have connections:

831
    >>> g = dgl.bipartite([(1, 3), (3, 5)], 'user', 'plays', 'game', num_nodes=(20, 10))
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852

    The following would compact the graph above to another bipartite graph with only
    two users and two games.

    >>> new_g, induced_nodes = dgl.compact_graphs(g)
    >>> induced_nodes
    {'user': tensor([1, 3]), 'game': tensor([3, 5])}

    The mapping tells us that only user #1 and #3 as well as game #3 and #5 are kept.
    Furthermore, the first user and second user in the compacted graph maps to
    user #1 and #3 in the original graph.  Games are similar.

    One can verify that the edge connections are kept the same in the compacted graph.

    >>> new_g.edges(form='all', order='eid', etype='plays')
    (tensor([0, 1]), tensor([0, 1]), tensor([0, 1]))

    When compacting multiple graphs, nodes that do not have any connections in any
    of the given graphs are removed.  So if we compact ``g`` and the following ``g2``
    graphs together:

853
    >>> g2 = dgl.bipartite([(1, 6), (6, 8)], 'user', 'plays', 'game', num_nodes=(20, 10))
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
    >>> (new_g, new_g2), induced_nodes = dgl.compact_graphs([g, g2])
    >>> induced_nodes
    {'user': tensor([1, 3, 6]), 'game': tensor([3, 5, 6, 8])}

    Then one can see that user #1 from both graphs, users #3 from the first graph, as
    well as user #6 from the second graph, are kept.  Games are similar.

    Similarly, one can also verify the connections:

    >>> new_g.edges(form='all', order='eid', etype='plays')
    (tensor([0, 1]), tensor([0, 1]), tensor([0, 1]))
    >>> new_g2.edges(form='all', order='eid', etype='plays')
    (tensor([0, 2]), tensor([2, 3]), tensor([0, 1]))
    """
    return_single = False
    if not isinstance(graphs, Iterable):
        graphs = [graphs]
        return_single = True
    if len(graphs) == 0:
        return []

    # Ensure the node types are ordered the same.
    # TODO(BarclayII): we ideally need to remove this constraint.
    ntypes = graphs[0].ntypes
878
    graph_dtype = graphs[0]._idtype_str
879
    graph_ctx = graphs[0]._graph.ctx
880
881
882
883
    for g in graphs:
        assert ntypes == g.ntypes, \
            ("All graphs should have the same node types in the same order, got %s and %s" %
             ntypes, g.ntypes)
884
885
        assert graph_dtype == g._idtype_str, "Expect graph data type to be {}, but got {}".format(
            graph_dtype, g._idtype_str)
886
887
        assert graph_ctx == g._graph.ctx, "Expect graph device to be {}, but got {}".format(
            graph_ctx, g._graph.ctx)
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908

    # Process the dictionary or tensor of "always preserve" nodes
    if always_preserve is None:
        always_preserve = {}
    elif not isinstance(always_preserve, Mapping):
        if len(ntypes) > 1:
            raise ValueError("Node type must be given if multiple node types exist.")
        always_preserve = {ntypes[0]: always_preserve}

    always_preserve_nd = []
    for ntype in ntypes:
        nodes = always_preserve.get(ntype, None)
        if nodes is None:
            nodes = nd.empty([0], graph_dtype, graph_ctx)
        else:
            nodes = F.zerocopy_to_dgl_ndarray(nodes)
        always_preserve_nd.append(nodes)

    # Compact and construct heterographs
    new_graph_indexes, induced_nodes = _CAPI_DGLCompactGraphs(
        [g._graph for g in graphs], always_preserve_nd)
909
    induced_nodes = [F.zerocopy_from_dgl_ndarray(nodes) for nodes in induced_nodes]
910
911
912
913
914
915
916
917
918
919
920
921

    new_graphs = [
        DGLHeteroGraph(new_graph_index, graph.ntypes, graph.etypes)
        for new_graph_index, graph in zip(new_graph_indexes, graphs)]
    for g in new_graphs:
        for i, ntype in enumerate(graphs[0].ntypes):
            g.nodes[ntype].data[NID] = induced_nodes[i]
    if return_single:
        new_graphs = new_graphs[0]

    return new_graphs

922
def to_block(g, dst_nodes=None, include_dst_in_src=True):
923
924
    """Convert a graph into a bipartite-structured "block" for message passing.

925
926
927
    A block graph is uni-directional bipartite graph consisting of two sets of nodes
    SRC and DST. Each set can have many node types while all the edges are from SRC
    nodes to DST nodes.
928

929
930
931
932
933
934
935
936
    Specifically, for each relation graph of canonical edge type ``(utype, etype, vtype)``,
    node type ``utype`` belongs to SRC while ``vtype`` belongs to DST.
    Edges from node type ``utype`` to node type ``vtype`` are preserved. If
    ``utype == vtype``, the result graph will have two node types of the same name ``utype``,
    but one belongs to SRC while the other belongs to DST. This is because although
    they have the same name, their node ids are relabeled differently (see below). In
    both cases, the canonical edge type in the new graph is still
    ``(utype, etype, vtype)``, so there is no difference when referring to it.
937

938
939
    Moreover, the function also relabels node ids in each type to make the graph more compact.
    Specifically, the nodes of type ``vtype`` would contain the nodes that have at least one
940
    inbound edge of any type, while ``utype`` would contain all the DST nodes of type ``vtype``,
941
    as well as the nodes that have at least one outbound edge to any DST node.
942

943
944
945
946
947
948
949
950
951
952
953
954
    Since DST nodes are included in SRC nodes, a common requirement is to fetch
    the DST node features from the SRC nodes features. To avoid expensive sparse lookup,
    the function assures that the DST nodes in both SRC and DST sets have the same ids.
    As a result, given the node feature tensor ``X`` of type ``utype``,
    the following code finds the corresponding DST node features of type ``vtype``:

    .. code::

        X[:block.number_of_nodes('DST/vtype')]

    If the ``dst_nodes`` argument is given, the DST nodes would contain the given nodes.
    Otherwise, the DST nodes would be determined by DGL via the rules above.
955
956
957
958
959

    Parameters
    ----------
    graph : DGLHeteroGraph
        The graph.
960
961
    dst_nodes : Tensor or dict[str, Tensor], optional
        Optional DST nodes. If a tensor is given, the graph must have only one node type.
962
963
    include_dst_in_src : bool, default True
        If False, do not include DST nodes in SRC nodes.
964
965
966
967
968
969
970
971
972
973
974
975
976

    Returns
    -------
    DGLHeteroGraph
        The new graph describing the block.

        The node IDs induced for each type in both sides would be stored in feature
        ``dgl.NID``.

        The edge IDs induced for each type would be stored in feature ``dgl.EID``.

    Notes
    -----
977
    This function is primarily for creating the structures for efficient
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
    computation of message passing.  See [TODO] for a detailed example.

    Examples
    --------
    Converting a homogeneous graph to a block as described above:
    >>> g = dgl.graph([(0, 1), (1, 2), (2, 3)])
    >>> block = dgl.to_block(g, torch.LongTensor([3, 2]))

    The right hand side nodes would be exactly the same as the ones given: [3, 2].
    >>> induced_dst = block.dstdata[dgl.NID]
    >>> induced_dst
    tensor([3, 2])

    The first few nodes of the left hand side nodes would also be exactly the same as
    the ones given.  The rest of the nodes are the ones necessary for message passing
    into nodes 3, 2.  This means that the node 1 would be included.
    >>> induced_src = block.srcdata[dgl.NID]
    >>> induced_src
    tensor([3, 2, 1])

    We can notice that the first two nodes are identical to the given nodes as well as
    the right hand side nodes.

    The induced edges can also be obtained by the following:
    >>> block.edata[dgl.EID]
    tensor([2, 1])

    This indicates that edge (2, 3) and (1, 2) are included in the result graph.  We can
    verify that the first edge in the block indeed maps to the edge (2, 3), and the
    second edge in the block indeed maps to the edge (1, 2):
    >>> src, dst = block.edges(order='eid')
    >>> induced_src[src], induced_dst[dst]
    (tensor([2, 1]), tensor([3, 2]))

    Converting a heterogeneous graph to a block is similar, except that when specifying
    the right hand side nodes, you have to give a dict:
    >>> g = dgl.bipartite([(0, 1), (1, 2), (2, 3)], utype='A', vtype='B')

1016
    If you don't specify any node of type A on the right hand side, the node type ``A``
1017
    in the block would have zero nodes on the DST side.
1018
    >>> block = dgl.to_block(g, {'B': torch.LongTensor([3, 2])})
1019
    >>> block.number_of_dst_nodes('A')
1020
    0
1021
    >>> block.number_of_dst_nodes('B')
1022
    2
1023
    >>> block.dstnodes['B'].data[dgl.NID]
1024
1025
1026
    tensor([3, 2])

    The left hand side would contain all the nodes on the right hand side:
1027
    >>> block.srcnodes['B'].data[dgl.NID]
1028
1029
1030
    tensor([3, 2])

    As well as all the nodes that have connections to the nodes on the right hand side:
1031
    >>> block.srcnodes['A'].data[dgl.NID]
1032
1033
    tensor([2, 1])
    """
1034
    if dst_nodes is None:
1035
        # Find all nodes that appeared as destinations
1036
        dst_nodes = defaultdict(list)
1037
1038
        for etype in g.canonical_etypes:
            _, dst = g.edges(etype=etype)
1039
1040
1041
1042
            dst_nodes[etype[2]].append(dst)
        dst_nodes = {ntype: F.unique(F.cat(values, 0)) for ntype, values in dst_nodes.items()}
    elif not isinstance(dst_nodes, Mapping):
        # dst_nodes is a Tensor, check if the g has only one type.
1043
1044
        if len(g.ntypes) > 1:
            raise ValueError(
1045
1046
                'Graph has more than one node type; please specify a dict for dst_nodes.')
        dst_nodes = {g.ntypes[0]: dst_nodes}
1047

1048
1049
    # dst_nodes is now a dict
    dst_nodes_nd = []
1050
    for ntype in g.ntypes:
1051
        nodes = dst_nodes.get(ntype, None)
1052
        if nodes is not None:
1053
            dst_nodes_nd.append(F.zerocopy_to_dgl_ndarray(nodes))
1054
        else:
1055
            dst_nodes_nd.append(nd.NULL[g._idtype_str])
1056

1057
1058
    new_graph_index, src_nodes_nd, induced_edges_nd = _CAPI_DGLToBlock(
        g._graph, dst_nodes_nd, include_dst_in_src)
1059

1060
1061
    # The new graph duplicates the original node types to SRC and DST sets.
    new_ntypes = ([ntype for ntype in g.ntypes], [ntype for ntype in g.ntypes])
1062
    new_graph = DGLHeteroGraph(new_graph_index, new_ntypes, g.etypes)
1063
    assert new_graph.is_unibipartite  # sanity check
1064
1065

    for i, ntype in enumerate(g.ntypes):
1066
        new_graph.srcnodes[ntype].data[NID] = F.zerocopy_from_dgl_ndarray(src_nodes_nd[i])
1067
1068
1069
1070
        if ntype in dst_nodes:
            new_graph.dstnodes[ntype].data[NID] = dst_nodes[ntype]
        else:
            # For empty dst node sets, still create empty mapping arrays.
1071
            new_graph.dstnodes[ntype].data[NID] = F.tensor([], dtype=g.idtype)
1072
1073

    for i, canonical_etype in enumerate(g.canonical_etypes):
1074
        induced_edges = F.zerocopy_from_dgl_ndarray(induced_edges_nd[i])
1075
        utype, etype, vtype = canonical_etype
1076
        new_canonical_etype = (utype, etype, vtype)
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
        new_graph.edges[new_canonical_etype].data[EID] = induced_edges

    return new_graph

def remove_edges(g, edge_ids):
    """Return a new graph with given edge IDs removed.

    The nodes are preserved.

    Parameters
    ----------
    graph : DGLHeteroGraph
        The graph
    edge_ids : Tensor or dict[etypes, Tensor]
        The edge IDs for each edge type.

    Returns
    -------
    DGLHeteroGraph
        The new graph.
        The edge ID mapping from the new graph to the original graph is stored as
        ``dgl.EID`` on edge features.
    """
    if not isinstance(edge_ids, Mapping):
        if len(g.etypes) != 1:
            raise ValueError(
                "Graph has more than one edge type; specify a dict for edge_id instead.")
        edge_ids = {g.canonical_etypes[0]: edge_ids}

1106
    edge_ids_nd = [nd.NULL[g._idtype_str]] * len(g.etypes)
1107
    for key, value in edge_ids.items():
1108
1109
1110
1111
        if value.dtype != g.idtype:
            # if didn't check, this function still works, but returns wrong result
            raise utils.InconsistentDtypeException("Expect edge id tensors({}) to have \
         the same index type as graph({})".format(value.dtype, g.idtype))
1112
1113
1114
1115
1116
        edge_ids_nd[g.get_etype_id(key)] = F.zerocopy_to_dgl_ndarray(value)
    new_graph_index, induced_eids_nd = _CAPI_DGLRemoveEdges(g._graph, edge_ids_nd)

    new_graph = DGLHeteroGraph(new_graph_index, g.ntypes, g.etypes)
    for i, canonical_etype in enumerate(g.canonical_etypes):
1117
        data = induced_eids_nd[i]
1118
        if len(data) == 0:
1119
1120
1121
1122
            # Empty means that either
            # (1) no edges are removed and edges are not shuffled.
            # (2) all edges are removed.
            # The following statement deals with both cases.
1123
            new_graph.edges[canonical_etype].data[EID] = F.arange(
1124
                0, new_graph.number_of_edges(canonical_etype))
1125
1126
        else:
            new_graph.edges[canonical_etype].data[EID] = F.zerocopy_from_dgl_ndarray(data)
1127
1128
1129

    return new_graph

1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
def in_subgraph(g, nodes):
    """Extract the subgraph containing only the in edges of the given nodes.

    The subgraph keeps the same type schema and the cardinality of the original one.
    Node/edge features are not preserved. The original IDs
    the extracted edges are stored as the `dgl.EID` feature in the returned graph.

    Parameters
    ----------
    g : DGLHeteroGraph
        Full graph structure.
    nodes : tensor or dict
        Node ids to sample neighbors from. The allowed types
        are dictionary of node types to node id tensors, or simply node id tensor if
        the given graph g has only one type of nodes.

    Returns
    -------
    DGLHeteroGraph
        The subgraph.
    """
    if not isinstance(nodes, dict):
        if len(g.ntypes) > 1:
            raise DGLError("Must specify node type when the graph is not homogeneous.")
        nodes = {g.ntypes[0] : nodes}
    nodes_all_types = []
    for ntype in g.ntypes:
        if ntype in nodes:
1158
            nodes_all_types.append(utils.toindex(nodes[ntype], g._idtype_str).todgltensor())
1159
        else:
1160
            nodes_all_types.append(nd.NULL[g._idtype_str])
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196

    subgidx = _CAPI_DGLInSubgraph(g._graph, nodes_all_types)
    induced_edges = subgidx.induced_edges
    ret = DGLHeteroGraph(subgidx.graph, g.ntypes, g.etypes)
    for i, etype in enumerate(ret.canonical_etypes):
        ret.edges[etype].data[EID] = induced_edges[i].tousertensor()
    return ret

def out_subgraph(g, nodes):
    """Extract the subgraph containing only the out edges of the given nodes.

    The subgraph keeps the same type schema and the cardinality of the original one.
    Node/edge features are not preserved. The original IDs
    the extracted edges are stored as the `dgl.EID` feature in the returned graph.

    Parameters
    ----------
    g : DGLHeteroGraph
        Full graph structure.
    nodes : tensor or dict
        Node ids to sample neighbors from. The allowed types
        are dictionary of node types to node id tensors, or simply node id tensor if
        the given graph g has only one type of nodes.

    Returns
    -------
    DGLHeteroGraph
        The subgraph.
    """
    if not isinstance(nodes, dict):
        if len(g.ntypes) > 1:
            raise DGLError("Must specify node type when the graph is not homogeneous.")
        nodes = {g.ntypes[0] : nodes}
    nodes_all_types = []
    for ntype in g.ntypes:
        if ntype in nodes:
1197
            nodes_all_types.append(utils.toindex(nodes[ntype], g._idtype_str).todgltensor())
1198
        else:
1199
            nodes_all_types.append(nd.NULL[g._idtype_str])
1200
1201
1202
1203
1204
1205
1206
1207

    subgidx = _CAPI_DGLOutSubgraph(g._graph, nodes_all_types)
    induced_edges = subgidx.induced_edges
    ret = DGLHeteroGraph(subgidx.graph, g.ntypes, g.etypes)
    for i, etype in enumerate(ret.canonical_etypes):
        ret.edges[etype].data[EID] = induced_edges[i].tousertensor()
    return ret

1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
def to_simple(g, return_counts='count', writeback_mapping=None):
    """Convert a heterogeneous multigraph to a heterogeneous simple graph, coalescing
    duplicate edges into one.

    This function does not preserve node and edge features.

    Parameters
    ----------
    g : DGLHeteroGraph
        The heterogeneous graph
    return_counts : str, optional
        If given, the returned graph would have a column with the same name that stores
        the number of duplicated edges from the original graph.
    writeback_mapping : str, optional
        If given, the mapping from the edge IDs of original graph to those of the returned
        graph would be written into edge feature with this name in the original graph for
        each edge type.

    Returns
    -------
    DGLHeteroGraph
        The new heterogeneous simple graph.

    Examples
    --------
    Consider the following graph
    >>> g = dgl.graph([(0, 1), (1, 3), (2, 2), (1, 3), (1, 4), (1, 4)])
    >>> sg = dgl.to_simple(g, return_counts='weights', writeback_mapping='new_eid')

    The returned graph would have duplicate edges connecting (1, 3) and (1, 4) removed:
    >>> sg.all_edges(form='uv', order='eid')
    (tensor([0, 1, 1, 2]), tensor([1, 3, 4, 2]))

    If ``return_counts`` is set, the returned graph will also return how many edges
    in the original graph are connecting the endpoints of the edges in the new graph:
    >>> sg.edata['weights']
    tensor([1, 2, 2, 1])

    This essentially reads that one edge is connecting (0, 1) in ``g``, whereas 2 edges
    are connecting (1, 3) in ``g``, etc.

    One can also retrieve the mapping from the edges in the original graph to edges in
    the new graph by setting ``writeback_mapping`` and running
    >>> g.edata['new_eid']
    tensor([0, 1, 3, 1, 2, 2])

    This tells us that the first edge in ``g`` is mapped to the first edge in ``sg``, and
    the second and the fourth edge are mapped to the second edge in ``sg``, etc.
    """
    simple_graph_index, counts, edge_maps = _CAPI_DGLToSimpleHetero(g._graph)
    simple_graph = DGLHeteroGraph(simple_graph_index, g.ntypes, g.etypes)
1259
1260
    counts = [F.zerocopy_from_dgl_ndarray(count) for count in counts]
    edge_maps = [F.zerocopy_from_dgl_ndarray(edge_map) for edge_map in edge_maps]
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271

    if return_counts is not None:
        for count, canonical_etype in zip(counts, g.canonical_etypes):
            simple_graph.edges[canonical_etype].data[return_counts] = count

    if writeback_mapping is not None:
        for edge_map, canonical_etype in zip(edge_maps, g.canonical_etypes):
            g.edges[canonical_etype].data[writeback_mapping] = edge_map

    return simple_graph

1272
1273
1274
def as_heterograph(g, ntype='_U', etype='_E'):
    """Convert a DGLGraph to a DGLHeteroGraph with one node and edge type.

1275
    Node and edge features are preserved. Returns 64 bits graph
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317

    Parameters
    ----------
    g : DGLGraph
        The graph
    ntype : str, optional
        The node type name
    etype : str, optional
        The edge type name

    Returns
    -------
    DGLHeteroGraph
        The heterograph.
    """
    hgi = _CAPI_DGLAsHeteroGraph(g._graph)
    hg = DGLHeteroGraph(hgi, [ntype], [etype])
    hg.ndata.update(g.ndata)
    hg.edata.update(g.edata)
    return hg

def as_immutable_graph(hg):
    """Convert a DGLHeteroGraph with one node and edge type into a DGLGraph.

    Node and edge features are preserved.

    Parameters
    ----------
    g : DGLHeteroGraph
        The heterograph

    Returns
    -------
    DGLGraph
        The graph.
    """
    gidx = _CAPI_DGLAsImmutableGraph(hg._graph)
    g = DGLGraph(gidx)
    g.ndata.update(hg.ndata)
    g.edata.update(hg.edata)
    return g

1318
_init_api("dgl.transform")