data_shuffle.py 52.8 KB
Newer Older
1
2
3
import gc
import logging
import math
4
5
import os
import sys
6
7
8
from datetime import timedelta
from timeit import default_timer as timer

9
10
import constants

11
import dgl
12
13
14
15
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
16
from convert_partition import create_dgl_object, create_metadata_json
17
from dataset_utils import get_dataset
18
from dist_lookup import DistLookupService
19
20
21
22
23
from globalids import (
    assign_shuffle_global_nids_edges,
    assign_shuffle_global_nids_nodes,
    lookup_shuffle_global_nids_edges,
)
24
from gloo_wrapper import allgather_sizes, alltoallv_cpu, gather_metadata_json
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from utils import (
    augment_edge_data,
    get_edge_types,
    get_etype_featnames,
    get_gnid_range_map,
    get_idranges,
    get_node_types,
    get_ntype_featnames,
    map_partid_rank,
    memory_snapshot,
    read_json,
    read_ntype_partition_files,
    write_dgl_objects,
    write_metadata_json,
)


def gen_node_data(
    rank, world_size, num_parts, id_lookup, ntid_ntype_map, schema_map
):
    """
46
47
    For this data processing pipeline, reading node files is not needed. All the needed information about
    the nodes can be found in the metadata json file. This function generates the nodes owned by a given
48
    process, using metis partitions.
49

50
    Parameters:
51
52
    -----------
    rank : int
53
        rank of the process
54
    world_size : int
55
        total no. of processes
56
57
    num_parts : int
        total no. of partitions
58
    id_lookup : instance of class DistLookupService
59
       Distributed lookup service used to map global-nids to respective partition-ids and
60
       shuffle-global-nids
61
    ntid_ntype_map :
62
        a dictionary where keys are node_type ids(integers) and values are node_type names(strings).
63
    schema_map:
64
        dictionary formed by reading the input metadata json file for the input dataset.
65
66
67

        Please note that, it is assumed that for the input graph files, the nodes of a particular node-type are
        split into `p` files (because of `p` partitions to be generated). On a similar node, edges of a particular
68
        edge-type are split into `p` files as well.
69

70
71
        #assuming m nodetypes present in the input graph
        "num_nodes_per_chunk" : [
72
73
            [a0, a1, a2, ... a<p-1>],
            [b0, b1, b2, ... b<p-1>],
74
75
76
            ...
            [m0, m1, m2, ... m<p-1>]
        ]
77
        Here, each sub-list, corresponding a nodetype in the input graph, has `p` elements. For instance [a0, a1, ... a<p-1>]
78
79
80
        where each element represents the number of nodes which are to be processed by a process during distributed partitioning.

        In addition to the above key-value pair for the nodes in the graph, the node-features are captured in the
81
        "node_data" key-value pair. In this dictionary the keys will be nodetype names and value will be a dictionary which
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
        is used to capture all the features present for that particular node-type. This is shown in the following example:

        "node_data" : {
            "paper": {       # node type
                "feat": {   # feature key
                    "format": {"name": "numpy"},
                    "data": ["node_data/paper-feat-part1.npy", "node_data/paper-feat-part2.npy"]
                },
                "label": {   # feature key
                    "format": {"name": "numpy"},
                    "data": ["node_data/paper-label-part1.npy", "node_data/paper-label-part2.npy"]
                },
                "year": {   # feature key
                    "format": {"name": "numpy"},
                    "data": ["node_data/paper-year-part1.npy", "node_data/paper-year-part2.npy"]
                }
            }
        }
100
101
102
        In the above textual description we have a node-type, which is paper, and it has 3 features namely feat, label and year.
        Each feature has `p` files whose location in the filesystem is the list for the key "data" and "foramt" is used to
        describe storage format.
103
104
105

    Returns:
    --------
106
107
    dictionary :
        dictionary where keys are column names and values are numpy arrays, these arrays are generated by
108
109
        using information present in the metadata json file

110
    """
111
    local_node_data = {}
112
113
114
115
116
117
    for local_part_id in range(num_parts // world_size):
        local_node_data[constants.GLOBAL_NID + "/" + str(local_part_id)] = []
        local_node_data[constants.NTYPE_ID + "/" + str(local_part_id)] = []
        local_node_data[
            constants.GLOBAL_TYPE_NID + "/" + str(local_part_id)
        ] = []
118
119

    # Note that `get_idranges` always returns two dictionaries. Keys in these
120
    # dictionaries are type names for nodes and edges and values are
121
    # `num_parts` number of tuples indicating the range of type-ids in first
122
123
124
125
126
127
    # dictionary and range of global-nids in the second dictionary.
    type_nid_dict, global_nid_dict = get_idranges(
        schema_map[constants.STR_NODE_TYPE],
        schema_map[constants.STR_NUM_NODES_PER_CHUNK],
        num_chunks=num_parts,
    )
128

129
    for ntype_id, ntype_name in ntid_ntype_map.items():
130
131
132
133
        # No. of nodes in each process can differ significantly in lopsided distributions
        # Synchronize on a per ntype basis
        dist.barrier()

134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
        type_start, type_end = (
            type_nid_dict[ntype_name][0][0],
            type_nid_dict[ntype_name][-1][1],
        )
        gnid_start, gnid_end = (
            global_nid_dict[ntype_name][0, 0],
            global_nid_dict[ntype_name][0, 1],
        )

        node_partid_slice = id_lookup.get_partition_ids(
            np.arange(gnid_start, gnid_end, dtype=np.int64)
        )  # exclusive

        for local_part_id in range(num_parts // world_size):
            cond = node_partid_slice == (rank + local_part_id * world_size)
149
150
151
152
153
            own_gnids = np.arange(gnid_start, gnid_end, dtype=np.int64)
            own_gnids = own_gnids[cond]

            own_tnids = np.arange(type_start, type_end, dtype=np.int64)
            own_tnids = own_tnids[cond]
154

155
156
157
158
159
160
161
162
163
            local_node_data[
                constants.NTYPE_ID + "/" + str(local_part_id)
            ].append(np.ones(own_gnids.shape, dtype=np.int64) * ntype_id)
            local_node_data[
                constants.GLOBAL_NID + "/" + str(local_part_id)
            ].append(own_gnids)
            local_node_data[
                constants.GLOBAL_TYPE_NID + "/" + str(local_part_id)
            ].append(own_tnids)
164
165
166
167
168

    for k in local_node_data.keys():
        local_node_data[k] = np.concatenate(local_node_data[k])

    return local_node_data
169

170

171
def exchange_edge_data(rank, world_size, num_parts, edge_data, id_lookup):
172
173
174
175
176
177
178
179
180
181
182
183
184
185
    """
    Exchange edge_data among processes in the world.
    Prepare list of sliced data targeting each process and trigger
    alltoallv_cpu to trigger messaging api

    Parameters:
    -----------
    rank : int
        rank of the process
    world_size : int
        total no. of processes
    edge_data : dictionary
        edge information, as a dicitonary which stores column names as keys and values
        as column data. This information is read from the edges.txt file.
186
    id_lookup : DistLookService object
187
188
189

    Returns:
    --------
190
    dictionary :
191
192
193
194
        the input argument, edge_data, is updated with the edge data received by other processes
        in the world.
    """

195
196
197
    # Synchronize at the beginning of this function
    dist.barrier()

198
    # Prepare data for each rank in the cluster.
199
    start = timer()
200

201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
    CHUNK_SIZE = 100 * 1000 * 1000  # 100 * 8 * 5 = 1 * 4 = 8 GB/message/node
    num_edges = edge_data[constants.GLOBAL_SRC_ID].shape[0]
    all_counts = allgather_sizes(
        [num_edges], world_size, num_parts, return_sizes=True
    )
    max_edges = np.amax(all_counts)
    all_edges = np.sum(all_counts)
    num_chunks = (max_edges // CHUNK_SIZE) + (
        0 if (max_edges % CHUNK_SIZE == 0) else 1
    )
    LOCAL_CHUNK_SIZE = (num_edges // num_chunks) + (
        0 if (num_edges % num_chunks == 0) else 1
    )
    logging.info(
        f"[Rank: {rank} Edge Data Shuffle - max_edges: {max_edges}, \
                        local_edges: {num_edges} and num_chunks: {num_chunks} \
                        Total edges: {all_edges} Local_CHUNK_SIZE: {LOCAL_CHUNK_SIZE}"
    )

    # Start sending the chunks to the rest of the processes
    for local_part_id in range(num_parts // world_size):
        local_src_ids = []
        local_dst_ids = []
        local_type_eids = []
        local_etype_ids = []
        local_eids = []

        for chunk in range(num_chunks):
            start = chunk * LOCAL_CHUNK_SIZE
            end = (chunk + 1) * LOCAL_CHUNK_SIZE

            logging.info(
                f"[Rank: {rank}] EdgeData Shuffle: processing \
                    local_part_id: {local_part_id} and chunkid: {chunk}"
235
            )
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
            cur_src_id = edge_data[constants.GLOBAL_SRC_ID][start:end]
            cur_dst_id = edge_data[constants.GLOBAL_DST_ID][start:end]
            cur_type_eid = edge_data[constants.GLOBAL_TYPE_EID][start:end]
            cur_etype_id = edge_data[constants.ETYPE_ID][start:end]
            cur_eid = edge_data[constants.GLOBAL_EID][start:end]

            input_list = []
            owner_ids = id_lookup.get_partition_ids(cur_dst_id)
            for idx in range(world_size):
                send_idx = owner_ids == (idx + local_part_id * world_size)
                send_idx = send_idx.reshape(cur_src_id.shape[0])
                filt_data = np.column_stack(
                    (
                        cur_src_id[send_idx == 1],
                        cur_dst_id[send_idx == 1],
                        cur_type_eid[send_idx == 1],
                        cur_etype_id[send_idx == 1],
                        cur_eid[send_idx == 1],
                    )
255
                )
256
257
258
259
260
261
262
263
264
                if filt_data.shape[0] <= 0:
                    input_list.append(torch.empty((0, 5), dtype=torch.int64))
                else:
                    input_list.append(torch.from_numpy(filt_data))

            # Now send newly formed chunk to others.
            dist.barrier()
            output_list = alltoallv_cpu(
                rank, world_size, input_list, retain_nones=False
265
            )
266

267
268
269
270
271
272
273
274
            # Replace the values of the edge_data, with the received data from all the other processes.
            rcvd_edge_data = torch.cat(output_list).numpy()
            local_src_ids.append(rcvd_edge_data[:, 0])
            local_dst_ids.append(rcvd_edge_data[:, 1])
            local_type_eids.append(rcvd_edge_data[:, 2])
            local_etype_ids.append(rcvd_edge_data[:, 3])
            local_eids.append(rcvd_edge_data[:, 4])

275
276
        edge_data[
            constants.GLOBAL_SRC_ID + "/" + str(local_part_id)
277
        ] = np.concatenate(local_src_ids)
278
279
        edge_data[
            constants.GLOBAL_DST_ID + "/" + str(local_part_id)
280
        ] = np.concatenate(local_dst_ids)
281
282
        edge_data[
            constants.GLOBAL_TYPE_EID + "/" + str(local_part_id)
283
        ] = np.concatenate(local_type_eids)
284
285
        edge_data[
            constants.ETYPE_ID + "/" + str(local_part_id)
286
        ] = np.concatenate(local_etype_ids)
287
288
        edge_data[
            constants.GLOBAL_EID + "/" + str(local_part_id)
289
290
291
292
293
294
295
296
297
298
299
300
301
        ] = np.concatenate(local_eids)

    # Check if the data was exchanged correctly
    local_edge_count = 0
    for local_part_id in range(num_parts // world_size):
        local_edge_count += edge_data[
            constants.GLOBAL_SRC_ID + "/" + str(local_part_id)
        ].shape[0]
    shuffle_edge_counts = allgather_sizes(
        [local_edge_count], world_size, num_parts, return_sizes=True
    )
    shuffle_edge_total = np.sum(shuffle_edge_counts)
    assert shuffle_edge_total == all_edges
302

303
    end = timer()
304
305
306
    logging.info(
        f"[Rank: {rank}] Time to send/rcv edge data: {timedelta(seconds=end-start)}"
    )
307

308
309
310
311
312
313
314
    # Clean up.
    edge_data.pop(constants.GLOBAL_SRC_ID)
    edge_data.pop(constants.GLOBAL_DST_ID)
    edge_data.pop(constants.GLOBAL_TYPE_EID)
    edge_data.pop(constants.ETYPE_ID)
    edge_data.pop(constants.GLOBAL_EID)

315
316
    return edge_data

317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335

def exchange_feature(
    rank,
    data,
    id_lookup,
    feat_type,
    feat_key,
    featdata_key,
    gid_start,
    gid_end,
    type_id_start,
    type_id_end,
    local_part_id,
    world_size,
    num_parts,
    cur_features,
    cur_global_ids,
):
    """This function is used to send/receive one feature for either nodes or
336
337
338
339
340
    edges of the input graph dataset.

    Parameters:
    -----------
    rank : int
341
        integer, unique id assigned to the current process
342
    data: dicitonary
343
344
        dictionry in which node or edge features are stored and this information
        is read from the appropriate node features file which belongs to the
345
346
347
348
349
        current process
    id_lookup : instance of DistLookupService
        instance of an implementation of dist. lookup service to retrieve values
        for keys
    feat_type : string
350
351
352
        this is used to distinguish which features are being exchanged. Please
        note that for nodes ownership is clearly defined and for edges it is
        always assumed that destination end point of the edge defines the
353
354
        ownership of that particular edge
    feat_key : string
355
        this string is used as a key in the dictionary to store features, as
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
        tensors, in local dictionaries
    featdata_key : numpy array
        features associated with this feature key being processed
    gid_start : int
        starting global_id, of either node or edge, for the feature data
    gid_end : int
        ending global_if, of either node or edge, for the feature data
    type_id_start : int
        starting type_id for the feature data
    type_id_end : int
        ending type_id for the feature data
    local_part_id : int
        integers used to the identify the local partition id used to locate
        data belonging to this partition
    world_size : int
        total number of processes created
    num_parts : int
        total number of partitions
    cur_features : dictionary
375
        dictionary to store the feature data which belongs to the current
376
377
        process
    cur_global_ids : dictionary
378
        dictionary to store global ids, of either nodes or edges, for which
379
        the features stored in the cur_features dictionary
380

381
382
383
    Returns:
    -------
    dictionary :
384
        a dictionary is returned where keys are type names and
385
386
        feature data are the values
    list :
387
        a dictionary of global_ids either nodes or edges whose features are
388
389
        received during the data shuffle process
    """
390
    # type_ids for this feature subset on the current rank
391
392
393
394
395
396
397
398
399
    gids_feat = np.arange(gid_start, gid_end)
    tids_feat = np.arange(type_id_start, type_id_end)
    local_idx = np.arange(0, type_id_end - type_id_start)

    feats_per_rank = []
    global_id_per_rank = []

    tokens = feat_key.split("/")
    assert len(tokens) == 3
400

401
    local_feat_key = "/".join(tokens[:-1]) + "/" + str(local_part_id)
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
    # Get the partition ids for the range of global nids.
    if feat_type == constants.STR_NODE_FEATURES:
        # Retrieve the partition ids for the node features.
        # Each partition id will be in the range [0, num_parts).
        partid_slice = id_lookup.get_partition_ids(
            np.arange(gid_start, gid_end, dtype=np.int64)
        )
    else:
        # Edge data case.
        # Ownership is determined by the destination node.
        assert data is not None
        global_eids = np.arange(gid_start, gid_end, dtype=np.int64)

        # Now use `data` to extract destination nodes' global id
        # and use that to get the ownership
        common, idx1, idx2 = np.intersect1d(
            data[constants.GLOBAL_EID], global_eids, return_indices=True
        )
        assert common.shape[0] == idx2.shape[0]
421

422
423
424
        global_dst_nids = data[constants.GLOBAL_DST_ID][idx1]
        assert np.all(global_eids == data[constants.GLOBAL_EID][idx1])
        partid_slice = id_lookup.get_partition_ids(global_dst_nids)
425

426
    for idx in range(world_size):
427
        cond = partid_slice == (idx + local_part_id * world_size)
428
429
430
431
        gids_per_partid = gids_feat[cond]
        tids_per_partid = tids_feat[cond]
        local_idx_partid = local_idx[cond]

432
433
434
        if gids_per_partid.shape[0] == 0:
            feats_per_rank.append(torch.empty((0, 1), dtype=torch.float))
            global_id_per_rank.append(torch.empty((0, 1), dtype=torch.int64))
435
436
        else:
            feats_per_rank.append(featdata_key[local_idx_partid])
437
438
439
440
441
442
443
444
445
446
447
448
            global_id_per_rank.append(
                torch.from_numpy(gids_per_partid).type(torch.int64)
            )

    # features (and global nids) per rank to be sent out are ready
    # for transmission, perform alltoallv here.
    output_feat_list = alltoallv_cpu(
        rank, world_size, feats_per_rank, retain_nones=False
    )
    output_id_list = alltoallv_cpu(
        rank, world_size, global_id_per_rank, retain_nones=False
    )
449
450
451
452
    assert len(output_feat_list) == len(output_id_list), (
        "Length of feature list and id list are expected to be equal while "
        f"got {len(output_feat_list)} and {len(output_id_list)}."
    )
453

454
    # stitch node_features together to form one large feature tensor
455
456
457
    if len(output_feat_list) > 0:
        output_feat_list = torch.cat(output_feat_list)
        output_id_list = torch.cat(output_id_list)
458
        if local_feat_key in cur_features:
459
460
461
462
463
464
465
            temp = cur_features[local_feat_key]
            cur_features[local_feat_key] = torch.cat([temp, output_feat_list])
            temp = cur_global_ids[local_feat_key]
            cur_global_ids[local_feat_key] = torch.cat([temp, output_id_list])
        else:
            cur_features[local_feat_key] = output_feat_list
            cur_global_ids[local_feat_key] = output_id_list
466
467
468
469

    return cur_features, cur_global_ids


470
471
472
473
474
475
476
477
478
479
480
def exchange_features(
    rank,
    world_size,
    num_parts,
    feature_tids,
    type_id_map,
    id_lookup,
    feature_data,
    feat_type,
    data,
):
481
482
    """
    This function is used to shuffle node features so that each process will receive
483
    all the node features whose corresponding nodes are owned by the same process.
484
485
    The mapping procedure to identify the owner process is not straight forward. The
    following steps are used to identify the owner processes for the locally read node-
486
    features.
487
488
    a. Compute the global_nids for the locally read node features. Here metadata json file
        is used to identify the corresponding global_nids. Please note that initial graph input
489
490
491
492
493
        nodes.txt files are sorted based on node_types.
    b. Using global_nids and metis partitions owner processes can be easily identified.
    c. Now each process sends the global_nids for which shuffle_global_nids are needed to be
        retrieved.
    d. After receiving the corresponding shuffle_global_nids these ids are added to the
494
        node_data and edge_data dictionaries
495

496
497
    This pipeline assumes all the input data in numpy format, except node/edge features which
    are maintained as tensors throughout the various stages of the pipeline execution.
498

499
    Parameters:
500
501
502
503
    -----------
    rank : int
        rank of the current process
    world_size : int
504
        total no. of participating processes.
505
    feature_tids : dictionary
506
        dictionary with keys as node-type names with suffixes as feature names
507
508
509
        and value is a dictionary. This dictionary contains information about
        node-features associated with a given node-type and value is a list.
        This list contains a of indexes, like [starting-idx, ending-idx) which
510
        can be used to index into the node feature tensors read from
511
512
        corresponding input files.
    type_id_map : dictionary
513
        mapping between type names and global_ids, of either nodes or edges,
514
        which belong to the keys in this dictionary
515
    id_lookup : instance of class DistLookupService
516
       Distributed lookup service used to map global-nids to respective
517
       partition-ids and shuffle-global-nids
518
    feat_type : string
519
520
521
        this is used to distinguish which features are being exchanged. Please
        note that for nodes ownership is clearly defined and for edges it is
        always assumed that destination end point of the edge defines the
522
523
524
        ownership of that particular edge
    data: dicitonary
        dictionry in which node or edge features are stored and this information
525
        is read from the appropriate node features file which belongs to the
526
        current process
527
528
529

    Returns:
    --------
530
    dictionary :
531
        a dictionary is returned where keys are type names and
532
533
        feature data are the values
    list :
534
        a dictionary of global_ids either nodes or edges whose features are
535
        received during the data shuffle process
536
537
    """
    start = timer()
538
    own_features = {}
539
540
541
542
543
544
545
    own_global_ids = {}

    # To iterate over the node_types and associated node_features
    for feat_key, type_info in feature_tids.items():
        # To iterate over the feature data, of a given (node or edge )type
        # type_info is a list of 3 elements (as shown below):
        #   [feature-name, starting-idx, ending-idx]
546
        #       feature-name is the name given to the feature-data,
547
        #       read from the input metadata file
548
549
        #       [starting-idx, ending-idx) specifies the range of indexes
        #        associated with the features data
550
551
        # Determine the owner process for these features.
        # Note that the keys in the node features (and similarly edge features)
552
        # dictionary is of the following format:
553
        #   `node_type/feature_name/local_part_id`:
554
555
556
        #    where node_type and feature_name are self-explanatory and
        #    local_part_id denotes the partition-id, in the local process,
        #    which will be used a suffix to store all the information of a
557
        #    given partition which is processed by the current process. Its
558
        #    values start from 0 onwards, for instance 0, 1, 2 ... etc.
559
        #    local_part_id can be easily mapped to global partition id very
560
        #    easily, using cyclic ordering. All local_part_ids = 0 from all
561
562
563
564
565
566
567
568
        #    processes will form global partition-ids between 0 and world_size-1.
        #    Similarly all local_part_ids = 1 from all processes will form
        #    global partition ids in the range [world_size, 2*world_size-1] and
        #    so on.
        tokens = feat_key.split("/")
        assert len(tokens) == 3
        type_name = tokens[0]
        feat_name = tokens[1]
569
        logging.info(f"[Rank: {rank}] processing feature: {feat_key}")
570

571
572
573
574
575
576
577
578
579
580
581
        for feat_info in type_info:
            # Compute the global_id range for this feature data
            type_id_start = int(feat_info[0])
            type_id_end = int(feat_info[1])
            begin_global_id = type_id_map[type_name][0]
            gid_start = begin_global_id + type_id_start
            gid_end = begin_global_id + type_id_end

            # Check if features exist for this type_name + feat_name.
            # This check should always pass, because feature_tids are built
            # by reading the input metadata json file for existing features.
582
            assert feat_key in feature_data
583

584
            for local_part_id in range(num_parts // world_size):
585
                featdata_key = feature_data[feat_key]
586
587
588

                # Synchronize for each feature
                dist.barrier()
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
                own_features, own_global_ids = exchange_feature(
                    rank,
                    data,
                    id_lookup,
                    feat_type,
                    feat_key,
                    featdata_key,
                    gid_start,
                    gid_end,
                    type_id_start,
                    type_id_end,
                    local_part_id,
                    world_size,
                    num_parts,
                    own_features,
                    own_global_ids,
                )
606
607

    end = timer()
608
609
610
    logging.info(
        f"[Rank: {rank}] Total time for feature exchange: {timedelta(seconds = end - start)}"
    )
611
    return own_features, own_global_ids
612

613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629

def exchange_graph_data(
    rank,
    world_size,
    num_parts,
    node_features,
    edge_features,
    node_feat_tids,
    edge_feat_tids,
    edge_data,
    id_lookup,
    ntypes_ntypeid_map,
    ntypes_gnid_range_map,
    etypes_geid_range_map,
    ntid_ntype_map,
    schema_map,
):
630
    """
631
    Wrapper function which is used to shuffle graph data on all the processes.
632

633
    Parameters:
634
635
636
637
    -----------
    rank : int
        rank of the current process
    world_size : int
638
        total no. of participating processes.
639
640
    num_parts : int
        total no. of graph partitions.
641
    node_feautres : dicitonary
642
643
        dictionry where node_features are stored and this information is read from the appropriate
        node features file which belongs to the current process
644
645
646
    edge_features : dictionary
        dictionary where edge_features are stored. This information is read from the appropriate
        edge feature files whose ownership is assigned to the current process
647
648
649
650
651
    node_feat_tids: dictionary
        in which keys are node-type names and values are triplets. Each triplet has node-feature name
        and the starting and ending type ids of the node-feature data read from the corresponding
        node feature data file read by current process. Each node type may have several features and
        hence each key may have several triplets.
652
653
    edge_feat_tids : dictionary
        a dictionary in which keys are edge-type names and values are triplets of the format
654
        <feat-name, start-per-type-idx, end-per-type-idx>. This triplet is used to identify
655
        the chunk of feature data for which current process is responsible for
656
    edge_data : dictionary
657
        dictionary which is used to store edge information as read from appropriate files assigned
658
        to each process.
659
    id_lookup : instance of class DistLookupService
660
       Distributed lookup service used to map global-nids to respective partition-ids and
661
       shuffle-global-nids
662
    ntypes_ntypeid_map : dictionary
663
        mappings between node type names and node type ids
664
    ntypes_gnid_range_map : dictionary
665
        mapping between node type names and global_nids which belong to the keys in this dictionary
666
667
668
    etypes_geid_range_map : dictionary
        mapping between edge type names and global_eids which are assigned to the edges of this
        edge_type
669
    ntid_ntype_map : dictionary
670
        mapping between node type id and no of nodes which belong to each node_type_id
671
672
    schema_map : dictionary
        is the data structure read from the metadata json file for the input graph
673
674
675

    Returns:
    --------
676
    dictionary :
677
678
        the input argument, node_data dictionary, is updated with the node data received from other processes
        in the world. The node data is received by each rank in the process of data shuffling.
679
680
    dictionary :
        node features dictionary which has node features for the nodes which are owned by the current
681
        process
682
683
    dictionary :
        list of global_nids for the nodes whose node features are received when node features shuffling was
684
        performed in the `exchange_features` function call
685
    dictionary :
686
687
        the input argument, edge_data dictionary, is updated with the edge data received from other processes
        in the world. The edge data is received by each rank in the process of data shuffling.
688
    dictionary :
689
690
691
692
693
        edge features dictionary which has edge features. These destination end points of these edges
        are owned by the current process
    dictionary :
        list of global_eids for the edges whose edge features are received when edge features shuffling
        was performed in the `exchange_features` function call
694
    """
695
    memory_snapshot("ShuffleNodeFeaturesBegin: ", rank)
696
697
698
699
700
701
702
703
704
705
706
    rcvd_node_features, rcvd_global_nids = exchange_features(
        rank,
        world_size,
        num_parts,
        node_feat_tids,
        ntypes_gnid_range_map,
        id_lookup,
        node_features,
        constants.STR_NODE_FEATURES,
        None,
    )
707
    dist.barrier()
708
    memory_snapshot("ShuffleNodeFeaturesComplete: ", rank)
709
710
711
712
713
714
715
716
717
718
719
720
721
    logging.info(f"[Rank: {rank}] Done with node features exchange.")

    rcvd_edge_features, rcvd_global_eids = exchange_features(
        rank,
        world_size,
        num_parts,
        edge_feat_tids,
        etypes_geid_range_map,
        id_lookup,
        edge_features,
        constants.STR_EDGE_FEATURES,
        edge_data,
    )
722
    dist.barrier()
723
    logging.info(f"[Rank: {rank}] Done with edge features exchange.")
724

725
726
727
    node_data = gen_node_data(
        rank, world_size, num_parts, id_lookup, ntid_ntype_map, schema_map
    )
728
    dist.barrier()
729
    memory_snapshot("NodeDataGenerationComplete: ", rank)
730

731
732
733
    edge_data = exchange_edge_data(
        rank, world_size, num_parts, edge_data, id_lookup
    )
734
    dist.barrier()
735
    memory_snapshot("ShuffleEdgeDataComplete: ", rank)
736
737
738
739
740
741
742
743
744
    return (
        node_data,
        rcvd_node_features,
        rcvd_global_nids,
        edge_data,
        rcvd_edge_features,
        rcvd_global_eids,
    )

745

746
def read_dataset(rank, world_size, id_lookup, params, schema_map):
747
748
    """
    This function gets the dataset and performs post-processing on the data which is read from files.
749
    Additional information(columns) are added to nodes metadata like owner_process, global_nid which
750
751
    are later used in processing this information. For edge data, which is now a dictionary, we add new columns
    like global_edge_id and owner_process. Augmenting these data structure helps in processing these data structures
752
    when data shuffling is performed.
753
754
755
756
757

    Parameters:
    -----------
    rank : int
        rank of the current process
758
    world_size : int
759
        total no. of processes instantiated
760
    id_lookup : instance of class DistLookupService
761
       Distributed lookup service used to map global-nids to respective partition-ids and
762
       shuffle-global-nids
763
    params : argparser object
764
        argument parser object to access command line arguments
765
766
    schema_map : dictionary
        dictionary created by reading the input graph metadata json file
767

768
    Returns :
769
770
    ---------
    dictionary
771
772
        in which keys are node-type names and values are are tuples representing the range of ids
        for nodes to be read by the current process
773
774
    dictionary
        node features which is a dictionary where keys are feature names and values are feature
775
        data as multi-dimensional tensors
776
777
778
779
780
    dictionary
        in which keys are node-type names and values are triplets. Each triplet has node-feature name
        and the starting and ending type ids of the node-feature data read from the corresponding
        node feature data file read by current process. Each node type may have several features and
        hence each key may have several triplets.
781
    dictionary
782
783
        edge data information is read from edges.txt and additional columns are added such as
        owner process for each edge.
784
785
    dictionary
        edge features which is also a dictionary, similar to node features dictionary
786
787
788
789
    dictionary
        a dictionary in which keys are edge-type names and values are tuples indicating the range of ids
        for edges read by the current process.
    dictionary
790
        a dictionary in which keys are edge-type names and values are triplets,
791
792
        (edge-feature-name, start_type_id, end_type_id). These type_ids are indices in the edge-features
        read by the current process. Note that each edge-type may have several edge-features.
793
794
    """
    edge_features = {}
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
    (
        node_tids,
        node_features,
        node_feat_tids,
        edge_data,
        edge_tids,
        edge_features,
        edge_feat_tids,
    ) = get_dataset(
        params.input_dir,
        params.graph_name,
        rank,
        world_size,
        params.num_parts,
        schema_map,
    )
811
812
    # Synchronize so that everybody completes reading dataset from disk
    dist.barrier()
813
    logging.info(f"[Rank: {rank}] Done reading dataset {params.input_dir}")
814

815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
    edge_data = augment_edge_data(
        edge_data, id_lookup, edge_tids, rank, world_size, params.num_parts
    )
    logging.info(
        f"[Rank: {rank}] Done augmenting edge_data: {len(edge_data)}, {edge_data[constants.GLOBAL_SRC_ID].shape}"
    )

    return (
        node_tids,
        node_features,
        node_feat_tids,
        edge_data,
        edge_features,
        edge_tids,
        edge_feat_tids,
    )
831
832


833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
def reorder_data(num_parts, world_size, data, key):
    """
    Auxiliary function used to sort node and edge data for the input graph.

    Parameters:
    -----------
    num_parts : int
        total no. of partitions
    world_size : int
        total number of nodes used in this execution
    data : dictionary
        which is used to store the node and edge data for the input graph
    key : string
        specifies the column which is used to determine the sort order for
        the remaining columns

    Returns:
    --------
    dictionary
        same as the input dictionary, but with reordered columns (values in
        the dictionary), as per the np.argsort results on the column specified
        by the ``key`` column
    """
    for local_part_id in range(num_parts // world_size):
        sorted_idx = data[key + "/" + str(local_part_id)].argsort()
        for k, v in data.items():
            tokens = k.split("/")
            assert len(tokens) == 2
            if tokens[1] == str(local_part_id):
                data[k] = v[sorted_idx]
        sorted_idx = None
    gc.collect()
    return data


868
869
def gen_dist_partitions(rank, world_size, params):
    """
870
871
    Function which will be executed by all Gloo processes to begin execution of the pipeline.
    This function expects the input dataset is split across multiple file format.
872

873
    Input dataset and its file structure is described in metadata json file which is also part of the
874
875
    input dataset. On a high-level, this metadata json file contains information about the following items
    a) Nodes metadata, It is assumed that nodes which belong to each node-type are split into p files
876
877
       (wherer `p` is no. of partitions).
    b) Similarly edge metadata contains information about edges which are split into p-files.
878
879
880
881
882
    c) Node and Edge features, it is also assumed that each node (and edge) feature, if present, is also
       split into `p` files.

    For example, a sample metadata json file might be as follows: :
    (In this toy example, we assume that we have "m" node-types, "k" edge types, and for node_type = ntype0-name
883
     we have two features namely feat0-name and feat1-name. Please note that the node-features are also split into
884
885
886
887
     `p` files. This will help in load-balancing during data-shuffling phase).

    Terminology used to identify any particular "id" assigned to nodes, edges or node features. Prefix "global" is
    used to indicate that this information is either read from the input dataset or autogenerated based on the information
888
889
890
891
    read from input dataset files. Prefix "type" is used to indicate a unique id assigned to either nodes or edges.
    For instance, type_node_id means that a unique id, with a given node type,  assigned to a node. And prefix "shuffle"
    will be used to indicate a unique id, across entire graph, assigned to either a node or an edge. For instance,
    SHUFFLE_GLOBAL_NID means a unique id which is assigned to a node after the data shuffle is completed.
892

893
894
    Some high-level notes on the structure of the metadata json file.
    1. path(s) mentioned in the entries for nodes, edges and node-features files can be either absolute or relative.
895
       if these paths are relative, then it is assumed that they are relative to the folder from which the execution is
896
897
898
899
       launched.
    2. The id_startx and id_endx represent the type_node_id and type_edge_id respectively for nodes and edge data. This
       means that these ids should match the no. of nodes/edges read from any given file. Since these are type_ids for
       the nodes and edges in any given file, their global_ids can be easily computed as well.
900
901

    {
902
903
904
905
        "graph_name" : xyz,
        "node_type" : ["ntype0-name", "ntype1-name", ....], #m node types
        "num_nodes_per_chunk" : [
            [a0, a1, ...a<p-1>], #p partitions
906
            [b0, b1, ... b<p-1>],
907
908
909
910
911
912
            ....
            [c0, c1, ..., c<p-1>] #no, of node types
        ],
        "edge_type" : ["src_ntype:edge_type:dst_ntype", ....], #k edge types
        "num_edges_per_chunk" : [
            [a0, a1, ...a<p-1>], #p partitions
913
            [b0, b1, ... b<p-1>],
914
915
916
            ....
            [c0, c1, ..., c<p-1>] #no, of edge types
        ],
917
918
        "node_data" : {
            "ntype0-name" : {
919
920
921
922
923
924
                "feat0-name" : {
                    "format" : {"name": "numpy"},
                    "data" :   [ #list of lists
                        ["<path>/feat-0.npy", 0, id_end0],
                        ["<path>/feat-1.npy", id_start1, id_end1],
                        ....
925
                        ["<path>/feat-<p-1>.npy", id_start<p-1>, id_end<p-1>]
926
927
928
                    ]
                },
                "feat1-name" : {
929
                    "format" : {"name": "numpy"},
930
931
932
933
                    "data" : [ #list of lists
                        ["<path>/feat-0.npy", 0, id_end0],
                        ["<path>/feat-1.npy", id_start1, id_end1],
                        ....
934
                        ["<path>/feat-<p-1>.npy", id_start<p-1>, id_end<p-1>]
935
936
                    ]
                }
937
938
            }
        },
939
        "edges": { #k edge types
940
            "src_ntype:etype0-name:dst_ntype" : {
941
                "format": {"name" : "csv", "delimiter" : " "},
942
943
944
945
946
947
                "data" : [
                    ["<path>/etype0-name-0.txt", 0, id_end0], #These are type_edge_ids for edges of this type
                    ["<path>/etype0-name-1.txt", id_start1, id_end1],
                    ...,
                    ["<path>/etype0-name-<p-1>.txt", id_start<p-1>, id_end<p-1>]
                ]
948
949
            },
            ...,
950
            "src_ntype:etype<k-1>-name:dst_ntype" : {
951
                "format": {"name" : "csv", "delimiter" : " "},
952
953
954
955
956
957
                "data" : [
                    ["<path>/etype<k-1>-name-0.txt", 0, id_end0],
                    ["<path>/etype<k-1>-name-1.txt", id_start1, id_end1],
                    ...,
                    ["<path>/etype<k-1>-name-<p-1>.txt", id_start<p-1>, id_end<p-1>]
                ]
958
959
            },
        },
960
    }
961

962
    The function performs the following steps:
963
    1. Reads the metis partitions to identify the owner process of all the nodes in the entire graph.
964
    2. Reads the input data set, each partitipating process will map to a single file for the edges,
965
966
967
        node-features and edge-features for each node-type and edge-types respectively. Using nodes metadata
        information, nodes which are owned by a given process are generated to optimize communication to some
        extent.
968
    3. Now each process shuffles the data by identifying the respective owner processes using metis
969
970
971
972
        partitions.
        a. To identify owner processes for nodes, metis partitions will be used.
        b. For edges, the owner process of the destination node will be the owner of the edge as well.
        c. For node and edge features, identifying the owner process is a little bit involved.
973
974
            For this purpose, graph metadata json file is used to first map the locally read node features
            to their global_nids. Now owner process is identified using metis partitions for these global_nids
975
976
977
978
979
980
981
            to retrieve shuffle_global_nids. A similar process is used for edge_features as well.
        d. After all the data shuffling is done, the order of node-features may be different when compared to
            their global_type_nids. Node- and edge-data are ordered by node-type and edge-type respectively.
            And now node features and edge features are re-ordered to match the order of their node- and edge-types.
    4. Last step is to create the DGL objects with the data present on each of the processes.
        a. DGL objects for nodes, edges, node- and edge- features.
        b. Metadata is gathered from each process to create the global metadata json file, by process rank = 0.
982
983
984
985
986
987
988
989
990
991
992

    Parameters:
    ----------
    rank : int
        integer representing the rank of the current process in a typical distributed implementation
    world_size : int
        integer representing the total no. of participating processes in a typical distributed implementation
    params : argparser object
        this object, key value pairs, provides access to the command line arguments from the runtime environment
    """
    global_start = timer()
993
994
995
    logging.info(
        f"[Rank: {rank}] Starting distributed data processing pipeline..."
    )
996
    memory_snapshot("Pipeline Begin: ", rank)
997
    # init processing
998
999
    schema_map = read_json(os.path.join(params.input_dir, params.schema))

1000
1001
1002
1003
1004
1005
1006
    # Initialize distributed lookup service for partition-id and shuffle-global-nids mappings
    # for global-nids
    _, global_nid_ranges = get_idranges(
        schema_map[constants.STR_NODE_TYPE],
        schema_map[constants.STR_NUM_NODES_PER_CHUNK],
        params.num_parts,
    )
1007
    id_map = dgl.distributed.id_map.IdMap(global_nid_ranges)
1008
1009
1010
1011

    # The resources, which are node-id to partition-id mappings, are split
    # into `world_size` number of parts, where each part can be mapped to
    # each physical node.
1012
1013
1014
1015
1016
1017
    id_lookup = DistLookupService(
        os.path.join(params.input_dir, params.partitions_dir),
        schema_map[constants.STR_NODE_TYPE],
        id_map,
        rank,
        world_size,
1018
        params.num_parts,
1019
    )
1020
1021

    ntypes_ntypeid_map, ntypes, ntypeid_ntypes_map = get_node_types(schema_map)
1022
    etypes_etypeid_map, etypes, etypeid_etypes_map = get_edge_types(schema_map)
1023
1024
1025
    logging.info(
        f"[Rank: {rank}] Initialized metis partitions and node_types map..."
    )
1026

1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
    # read input graph files and augment these datastructures with
    # appropriate information (global_nid and owner process) for node and edge data
    (
        node_tids,
        node_features,
        node_feat_tids,
        edge_data,
        edge_features,
        edge_tids,
        edge_feat_tids,
    ) = read_dataset(rank, world_size, id_lookup, params, schema_map)
    logging.info(
        f"[Rank: {rank}] Done augmenting file input data with auxilary columns"
    )
1041
    memory_snapshot("DatasetReadComplete: ", rank)
1042

1043
1044
1045
    # send out node and edge data --- and appropriate features.
    # this function will also stitch the data recvd from other processes
    # and return the aggregated data
1046
    ntypes_gnid_range_map = get_gnid_range_map(node_tids)
1047
    etypes_geid_range_map = get_gnid_range_map(edge_tids)
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
    (
        node_data,
        rcvd_node_features,
        rcvd_global_nids,
        edge_data,
        rcvd_edge_features,
        rcvd_global_eids,
    ) = exchange_graph_data(
        rank,
        world_size,
        params.num_parts,
        node_features,
        edge_features,
        node_feat_tids,
        edge_feat_tids,
        edge_data,
        id_lookup,
        ntypes_ntypeid_map,
        ntypes_gnid_range_map,
        etypes_geid_range_map,
        ntypeid_ntypes_map,
        schema_map,
    )
1071
    gc.collect()
1072
    logging.info(f"[Rank: {rank}] Done with data shuffling...")
1073
    memory_snapshot("DataShuffleComplete: ", rank)
1074

1075
    # sort node_data by ntype
1076
1077
1078
    node_data = reorder_data(
        params.num_parts, world_size, node_data, constants.NTYPE_ID
    )
1079
    logging.info(f"[Rank: {rank}] Sorted node_data by node_type")
1080
    memory_snapshot("NodeDataSortComplete: ", rank)
1081

1082
    # resolve global_ids for nodes
1083
1084
    # Synchronize before assigning shuffle-global-ids to nodes
    dist.barrier()
1085
1086
1087
1088
    assign_shuffle_global_nids_nodes(
        rank, world_size, params.num_parts, node_data
    )
    logging.info(f"[Rank: {rank}] Done assigning global-ids to nodes...")
1089
    memory_snapshot("ShuffleGlobalID_Nodes_Complete: ", rank)
1090

1091
    # shuffle node feature according to the node order on each rank.
1092
1093
1094
    for ntype_name in ntypes:
        featnames = get_ntype_featnames(ntype_name, schema_map)
        for featname in featnames:
1095
1096
1097
1098
1099
1100
1101
            # if a feature name exists for a node-type, then it should also have
            # feature data as well. Hence using the assert statement.
            for local_part_id in range(params.num_parts // world_size):
                feature_key = (
                    ntype_name + "/" + featname + "/" + str(local_part_id)
                )
                assert feature_key in rcvd_global_nids
1102
                global_nids = rcvd_global_nids[feature_key]
1103

1104
1105
1106
1107
1108
1109
1110
1111
                _, idx1, _ = np.intersect1d(
                    node_data[constants.GLOBAL_NID + "/" + str(local_part_id)],
                    global_nids,
                    return_indices=True,
                )
                shuffle_global_ids = node_data[
                    constants.SHUFFLE_GLOBAL_NID + "/" + str(local_part_id)
                ][idx1]
1112
                feature_idx = shuffle_global_ids.argsort()
1113

1114
1115
1116
                rcvd_node_features[feature_key] = rcvd_node_features[
                    feature_key
                ][feature_idx]
1117
    memory_snapshot("ReorderNodeFeaturesComplete: ", rank)
1118

1119
1120
1121
1122
1123
1124
    # Sort edge_data by etype
    edge_data = reorder_data(
        params.num_parts, world_size, edge_data, constants.ETYPE_ID
    )
    logging.info(f"[Rank: {rank}] Sorted edge_data by edge_type")
    memory_snapshot("EdgeDataSortComplete: ", rank)
1125

1126
1127
    # Synchronize before assigning shuffle-global-nids for edges end points.
    dist.barrier()
1128
1129
1130
1131
    shuffle_global_eid_offsets = assign_shuffle_global_nids_edges(
        rank, world_size, params.num_parts, edge_data
    )
    logging.info(f"[Rank: {rank}] Done assigning global_ids to edges ...")
1132

1133
    memory_snapshot("ShuffleGlobalID_Edges_Complete: ", rank)
1134

1135
    # Shuffle edge features according to the edge order on each rank.
1136
1137
1138
    for etype_name in etypes:
        featnames = get_etype_featnames(etype_name, schema_map)
        for featname in featnames:
1139
1140
1141
1142
            for local_part_id in range(params.num_parts // world_size):
                feature_key = (
                    etype_name + "/" + featname + "/" + str(local_part_id)
                )
1143
1144
                assert feature_key in rcvd_global_eids
                global_eids = rcvd_global_eids[feature_key]
1145

1146
1147
1148
1149
1150
1151
1152
1153
                _, idx1, _ = np.intersect1d(
                    edge_data[constants.GLOBAL_EID + "/" + str(local_part_id)],
                    global_eids,
                    return_indices=True,
                )
                shuffle_global_ids = edge_data[
                    constants.SHUFFLE_GLOBAL_EID + "/" + str(local_part_id)
                ][idx1]
1154
                feature_idx = shuffle_global_ids.argsort()
1155

1156
1157
1158
                rcvd_edge_features[feature_key] = rcvd_edge_features[
                    feature_key
                ][feature_idx]
1159

1160
    # determine global-ids for edge end-points
1161
1162
    # Synchronize before retrieving shuffle-global-nids for edges end points.
    dist.barrier()
1163
1164
1165
1166
1167
1168
    edge_data = lookup_shuffle_global_nids_edges(
        rank, world_size, params.num_parts, edge_data, id_lookup, node_data
    )
    logging.info(
        f"[Rank: {rank}] Done resolving orig_node_id for local node_ids..."
    )
1169
    memory_snapshot("ShuffleGlobalID_Lookup_Complete: ", rank)
1170

1171
1172
1173
1174
    def prepare_local_data(src_data, local_part_id):
        local_data = {}
        for k, v in src_data.items():
            tokens = k.split("/")
1175
            if tokens[len(tokens) - 1] == str(local_part_id):
1176
1177
1178
                local_data["/".join(tokens[:-1])] = v
        return local_data

1179
    # create dgl objects here
1180
    output_meta_json = {}
1181
    start = timer()
1182

1183
1184
    graph_formats = None
    if params.graph_formats:
1185
1186
1187
        graph_formats = params.graph_formats.split(",")

    for local_part_id in range(params.num_parts // world_size):
1188
1189
1190
        # Synchronize for each local partition of the graph object.
        dist.barrier()

1191
        num_edges = shuffle_global_eid_offsets[local_part_id]
1192
1193
1194
1195
1196
1197
        node_count = len(
            node_data[constants.NTYPE_ID + "/" + str(local_part_id)]
        )
        edge_count = len(
            edge_data[constants.ETYPE_ID + "/" + str(local_part_id)]
        )
1198
1199
        local_node_data = prepare_local_data(node_data, local_part_id)
        local_edge_data = prepare_local_data(edge_data, local_part_id)
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
        (
            graph_obj,
            ntypes_map_val,
            etypes_map_val,
            ntypes_map,
            etypes_map,
            orig_nids,
            orig_eids,
        ) = create_dgl_object(
            schema_map,
            rank + local_part_id * world_size,
            local_node_data,
            local_edge_data,
            num_edges,
            params.save_orig_nids,
            params.save_orig_eids,
        )
1217
        sort_etypes = len(etypes_map) > 1
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
        local_node_features = prepare_local_data(
            rcvd_node_features, local_part_id
        )
        local_edge_features = prepare_local_data(
            rcvd_edge_features, local_part_id
        )
        write_dgl_objects(
            graph_obj,
            local_node_features,
            local_edge_features,
            params.output,
            rank + (local_part_id * world_size),
            orig_nids,
            orig_eids,
            graph_formats,
            sort_etypes,
        )
1235
1236
        memory_snapshot("DiskWriteDGLObjectsComplete: ", rank)

1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
        # get the meta-data
        json_metadata = create_metadata_json(
            params.graph_name,
            node_count,
            edge_count,
            local_part_id * world_size + rank,
            params.num_parts,
            ntypes_map_val,
            etypes_map_val,
            ntypes_map,
            etypes_map,
            params.output,
        )
        output_meta_json[
            "local-part-id-" + str(local_part_id * world_size + rank)
        ] = json_metadata
1253
        memory_snapshot("MetadataCreateComplete: ", rank)
1254

1255
1256
    if rank == 0:
        # get meta-data from all partitions and merge them on rank-0
1257
1258
        metadata_list = gather_metadata_json(output_meta_json, rank, world_size)
        metadata_list[0] = output_meta_json
1259
1260
1261
1262
1263
1264
1265
        write_metadata_json(
            metadata_list,
            params.output,
            params.graph_name,
            world_size,
            params.num_parts,
        )
1266
    else:
1267
        # send meta-data to Rank-0 process
1268
        gather_metadata_json(output_meta_json, rank, world_size)
1269
    end = timer()
1270
1271
1272
    logging.info(
        f"[Rank: {rank}] Time to create dgl objects: {timedelta(seconds = end - start)}"
    )
1273
    memory_snapshot("MetadataWriteComplete: ", rank)
1274
1275

    global_end = timer()
1276
1277
1278
    logging.info(
        f"[Rank: {rank}] Total execution time of the program: {timedelta(seconds = global_end - global_start)}"
    )
1279
    memory_snapshot("PipelineComplete: ", rank)
1280

1281

1282
def single_machine_run(params):
1283
    """Main function for distributed implementation on a single machine
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294

    Parameters:
    -----------
    params : argparser object
        Argument Parser structure with pre-determined arguments as defined
        at the bottom of this file.
    """
    log_params(params)
    processes = []
    mp.set_start_method("spawn")

1295
1296
    # Invoke `target` function from each of the spawned process for distributed
    # implementation
1297
    for rank in range(params.world_size):
1298
1299
1300
1301
        p = mp.Process(
            target=run,
            args=(rank, params.world_size, gen_dist_partitions, params),
        )
1302
1303
1304
1305
1306
1307
        p.start()
        processes.append(p)

    for p in processes:
        p.join()

1308

1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
def run(rank, world_size, func_exec, params, backend="gloo"):
    """
    Init. function which is run by each process in the Gloo ProcessGroup

    Parameters:
    -----------
    rank : integer
        rank of the process
    world_size : integer
        number of processes configured in the Process Group
    proc_exec : function name
        function which will be invoked which has the logic for each process in the group
    params : argparser object
        argument parser object to access the command line arguments
    backend : string
        string specifying the type of backend to use for communication
    """
1326
1327
    os.environ["MASTER_ADDR"] = "127.0.0.1"
    os.environ["MASTER_PORT"] = "29500"
1328

1329
1330
1331
1332
1333
1334
1335
    # create Gloo Process Group
    dist.init_process_group(
        backend,
        rank=rank,
        world_size=world_size,
        timeout=timedelta(seconds=5 * 60),
    )
1336

1337
    # Invoke the main function to kick-off each process
1338
1339
    func_exec(rank, world_size, params)

1340

1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
def multi_machine_run(params):
    """
    Function to be invoked when executing data loading pipeline on multiple machines

    Parameters:
    -----------
    params : argparser object
        argparser object providing access to command line arguments.
    """
    rank = int(os.environ["RANK"])

1352
    # init the gloo process group here.
1353
    dist.init_process_group(
1354
1355
1356
1357
1358
1359
        backend="gloo",
        rank=rank,
        world_size=params.world_size,
        timeout=timedelta(seconds=params.process_group_timeout),
    )
    logging.info(f"[Rank: {rank}] Done with process group initialization...")
1360

1361
    # invoke the main function here.
1362
    gen_dist_partitions(rank, params.world_size, params)
1363
1364
1365
    logging.info(
        f"[Rank: {rank}] Done with Distributed data processing pipeline processing."
    )