Unverified Commit 8086d1ed authored by kylasa's avatar kylasa Committed by GitHub
Browse files

Adding launch script and wrapper script to trigger distributed graph … (#4276)



* Adding launch script and wrapper script to trigger distributed graph partitioning pipeline as defined in the UX document

1. dispatch_data.py is a wrapper script which builds the command and triggers the distributed partitioning pipeline
2. distgraphlaunch.py is the main python script which triggers the pipeline and to simplify its usage dispatch_data.py is included as a wrapper script around it.

* Added code to auto-detect python version and retrieve some parameters from the input metadata json file

1. Auto detect python version
2. Read the metadata json file and extract some parameters to pass to the user defined command which is used to trigger the pipeline.

* Updated the json file name to metadata.json file per UX documentation

1. Renamed json file name per UX documentation.

* address comments

* fix

* fix doc

* use unbuffered logging to cure anxiety

* cure more anxiety

* Update tools/dispatch_data.py
Co-authored-by: default avatarMinjie Wang <minjie.wang@nyu.edu>

* oops
Co-authored-by: default avatarQuan Gan <coin2028@hotmail.com>
Co-authored-by: default avatarMinjie Wang <minjie.wang@nyu.edu>
parent 067cd744
"""Launching distributed graph partitioning pipeline """
import os
import sys
import argparse
import logging
import json
INSTALL_DIR = os.path.abspath(os.path.join(__file__, '..'))
LAUNCH_SCRIPT = "distgraphlaunch.py"
PIPELINE_SCRIPT = "distpartitioning/data_proc_pipeline.py"
UDF_WORLD_SIZE = "world-size"
UDF_PART_DIR = "partitions-dir"
UDF_INPUT_DIR = "input-dir"
UDF_GRAPH_NAME = "graph-name"
UDF_SCHEMA = "schema"
UDF_NUM_PARTS = "num-parts"
UDF_OUT_DIR = "output"
LARG_PROCS_MACHINE = "num_proc_per_machine"
LARG_IPCONF = "ip_config"
LARG_MASTER_PORT = "master_port"
def get_launch_cmd(args) -> str:
cmd = sys.executable + " " + os.path.join(INSTALL_DIR, LAUNCH_SCRIPT)
cmd = f"{cmd} --{LARG_PROCS_MACHINE} 1 "
cmd = f"{cmd} --{LARG_IPCONF} {args.ip_config} "
cmd = f"{cmd} --{LARG_MASTER_PORT} {args.master_port} "
return cmd
def submit_jobs(args) -> str:
wrapper_command = os.path.join(INSTALL_DIR, LAUNCH_SCRIPT)
#read the json file and get the remaining argument here.
schema_path = os.path.join(args.in_dir, "metadata.json")
with open(schema_path) as schema:
schema_map = json.load(schema)
num_parts = len(schema_map["num_nodes_per_chunk"][0])
graph_name = schema_map["graph_name"]
argslist = ""
argslist += "--world-size {} ".format(num_parts)
argslist += "--partitions-dir {} ".format(args.partitions_dir)
argslist += "--input-dir {} ".format(args.in_dir)
argslist += "--graph-name {} ".format(graph_name)
argslist += "--schema {} ".format(schema_path)
argslist += "--num-parts {} ".format(num_parts)
argslist += "--output {} ".format(args.out_dir)
# (BarclayII) Is it safe to assume all the workers have the Python executable at the same path?
pipeline_cmd = os.path.join(INSTALL_DIR, PIPELINE_SCRIPT)
udf_cmd = f"{args.python_path} {pipeline_cmd} {argslist}"
launch_cmd = get_launch_cmd(args)
launch_cmd += '\"'+udf_cmd+'\"'
print(launch_cmd)
os.system(launch_cmd)
def main():
parser = argparse.ArgumentParser(description='Dispatch edge index and data to partitions', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--in-dir', type=str, help='Location of the input directory where the dataset is located')
parser.add_argument('--partitions-dir', type=str, help='Location of the partition-id mapping files which define node-ids and their respective partition-ids, relative to the input directory')
parser.add_argument('--out-dir', type=str, help='Location of the output directory where the graph partitions will be created by this pipeline')
parser.add_argument('--ip-config', type=str, help='File location of IP configuration for server processes')
parser.add_argument('--master-port', type=int, default=12345, help='port used by gloo group to create randezvous point')
parser.add_argument('--python-path', type=str, default=sys.executable, help='Path to the Python executable on all workers')
args, udf_command = parser.parse_known_args()
assert os.path.isdir(args.in_dir)
assert os.path.isdir(args.partitions_dir)
assert os.path.isfile(args.ip_config)
assert isinstance(args.master_port, int)
tokens = sys.executable.split(os.sep)
submit_jobs(args)
if __name__ == '__main__':
fmt = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=fmt, level=logging.INFO)
main()
"""Launching tool for DGL distributed training"""
import os
import stat
import sys
import subprocess
import argparse
import signal
import logging
import time
import json
import multiprocessing
import re
from functools import partial
from threading import Thread
from typing import Optional
DEFAULT_PORT = 30050
def cleanup_proc(get_all_remote_pids, conn):
'''This process tries to clean up the remote training tasks.
'''
print('cleanupu process runs')
# This process should not handle SIGINT.
signal.signal(signal.SIGINT, signal.SIG_IGN)
data = conn.recv()
# If the launch process exits normally, this process doesn't need to do anything.
if data == 'exit':
sys.exit(0)
else:
remote_pids = get_all_remote_pids()
# Otherwise, we need to ssh to each machine and kill the training jobs.
for (ip, port), pids in remote_pids.items():
kill_process(ip, port, pids)
print('cleanup process exits')
def kill_process(ip, port, pids):
'''ssh to a remote machine and kill the specified processes.
'''
curr_pid = os.getpid()
killed_pids = []
# If we kill child processes first, the parent process may create more again. This happens
# to Python's process pool. After sorting, we always kill parent processes first.
pids.sort()
for pid in pids:
assert curr_pid != pid
print('kill process {} on {}:{}'.format(pid, ip, port), flush=True)
kill_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'kill {}\''.format(pid)
subprocess.run(kill_cmd, shell=True)
killed_pids.append(pid)
# It's possible that some of the processes are not killed. Let's try again.
for i in range(3):
killed_pids = get_killed_pids(ip, port, killed_pids)
if len(killed_pids) == 0:
break
else:
killed_pids.sort()
for pid in killed_pids:
print('kill process {} on {}:{}'.format(pid, ip, port), flush=True)
kill_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'kill -9 {}\''.format(pid)
subprocess.run(kill_cmd, shell=True)
def get_killed_pids(ip, port, killed_pids):
'''Get the process IDs that we want to kill but are still alive.
'''
killed_pids = [str(pid) for pid in killed_pids]
killed_pids = ','.join(killed_pids)
ps_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'ps -p {} -h\''.format(killed_pids)
res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE)
pids = []
for p in res.stdout.decode('utf-8').split('\n'):
l = p.split()
if len(l) > 0:
pids.append(int(l[0]))
return pids
def execute_remote(
cmd: str,
ip: str,
port: int,
username: Optional[str] = ""
) -> Thread:
"""Execute command line on remote machine via ssh.
Args:
cmd: User-defined command (udf) to execute on the remote host.
ip: The ip-address of the host to run the command on.
port: Port number that the host is listening on.
thread_list:
username: Optional. If given, this will specify a username to use when issuing commands over SSH.
Useful when your infra requires you to explicitly specify a username to avoid permission issues.
Returns:
thread: The Thread whose run() is to run the `cmd` on the remote host. Returns when the cmd completes
on the remote host.
"""
ip_prefix = ""
if username:
ip_prefix += "{username}@".format(username=username)
# Construct ssh command that executes `cmd` on the remote host
ssh_cmd = "ssh -o StrictHostKeyChecking=no -p {port} {ip_prefix}{ip} '{cmd}'".format(
port=str(port),
ip_prefix=ip_prefix,
ip=ip,
cmd=cmd,
)
# thread func to run the job
def run(ssh_cmd):
subprocess.check_call(ssh_cmd, shell=True)
thread = Thread(target=run, args=(ssh_cmd,))
thread.setDaemon(True)
thread.start()
return thread
def get_remote_pids(ip, port, cmd_regex):
"""Get the process IDs that run the command in the remote machine.
"""
pids = []
curr_pid = os.getpid()
# Here we want to get the python processes. We may get some ssh processes, so we should filter them out.
ps_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'ps -aux | grep python | grep -v StrictHostKeyChecking\''
res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE)
for p in res.stdout.decode('utf-8').split('\n'):
l = p.split()
if len(l) < 2:
continue
# We only get the processes that run the specified command.
res = re.search(cmd_regex, p)
if res is not None and int(l[1]) != curr_pid:
pids.append(l[1])
pid_str = ','.join([str(pid) for pid in pids])
ps_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'pgrep -P {}\''.format(pid_str)
res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE)
pids1 = res.stdout.decode('utf-8').split('\n')
all_pids = []
for pid in set(pids + pids1):
if pid == '' or int(pid) == curr_pid:
continue
all_pids.append(int(pid))
all_pids.sort()
return all_pids
def get_all_remote_pids(hosts, ssh_port, udf_command):
'''Get all remote processes.
'''
remote_pids = {}
for node_id, host in enumerate(hosts):
ip, _ = host
# When creating training processes in remote machines, we may insert some arguments
# in the commands. We need to use regular expressions to match the modified command.
cmds = udf_command.split()
new_udf_command = ' .*'.join(cmds)
pids = get_remote_pids(ip, ssh_port, new_udf_command)
remote_pids[(ip, ssh_port)] = pids
return remote_pids
def construct_torch_dist_launcher_cmd(
num_trainers: int,
num_nodes: int,
node_rank: int,
master_addr: str,
master_port: int
) -> str:
"""Constructs the torch distributed launcher command.
Helper function.
Args:
num_trainers:
num_nodes:
node_rank:
master_addr:
master_port:
Returns:
cmd_str.
"""
torch_cmd_template = "-m torch.distributed.launch " \
"--nproc_per_node={nproc_per_node} " \
"--nnodes={nnodes} " \
"--node_rank={node_rank} " \
"--master_addr={master_addr} " \
"--master_port={master_port}"
return torch_cmd_template.format(
nproc_per_node=num_trainers,
nnodes=num_nodes,
node_rank=node_rank,
master_addr=master_addr,
master_port=master_port
)
def wrap_udf_in_torch_dist_launcher(
udf_command: str,
num_trainers: int,
num_nodes: int,
node_rank: int,
master_addr: str,
master_port: int,
) -> str:
"""Wraps the user-defined function (udf_command) with the torch.distributed.launch module.
Example: if udf_command is "python3 run/some/trainer.py arg1 arg2", then new_df_command becomes:
"python3 -m torch.distributed.launch <TORCH DIST ARGS> run/some/trainer.py arg1 arg2
udf_command is assumed to consist of pre-commands (optional) followed by the python launcher script (required):
Examples:
# simple
python3.7 path/to/some/trainer.py arg1 arg2
# multi-commands
(cd some/dir && python3.7 path/to/some/trainer.py arg1 arg2)
IMPORTANT: If udf_command consists of multiple python commands, then this will result in undefined behavior.
Args:
udf_command:
num_trainers:
num_nodes:
node_rank:
master_addr:
master_port:
Returns:
"""
torch_dist_cmd = construct_torch_dist_launcher_cmd(
num_trainers=num_trainers,
num_nodes=num_nodes,
node_rank=node_rank,
master_addr=master_addr,
master_port=master_port
)
# Auto-detect the python binary that kicks off the distributed trainer code.
# Note: This allowlist order matters, this will match with the FIRST matching entry. Thus, please add names to this
# from most-specific to least-specific order eg:
# (python3.7, python3.8) -> (python3)
# The allowed python versions are from this: https://www.dgl.ai/pages/start.html
python_bin_allowlist = (
"python3.6", "python3.7", "python3.8", "python3.9", "python3",
# for backwards compatibility, accept python2 but technically DGL is a py3 library, so this is not recommended
"python2.7", "python2",
)
# If none of the candidate python bins match, then we go with the default `python`
python_bin = "python"
for candidate_python_bin in python_bin_allowlist:
if candidate_python_bin in udf_command:
python_bin = candidate_python_bin
break
# transforms the udf_command from:
# python path/to/dist_trainer.py arg0 arg1
# to:
# python -m torch.distributed.launch [DIST TORCH ARGS] path/to/dist_trainer.py arg0 arg1
# Note: if there are multiple python commands in `udf_command`, this may do the Wrong Thing, eg launch each
# python command within the torch distributed launcher.
new_udf_command = udf_command.replace(python_bin, f"{python_bin} {torch_dist_cmd}")
return new_udf_command
def construct_dgl_server_env_vars(
ip_config: str,
num_proc_per_machine: int,
pythonpath: Optional[str] = "",
) -> str:
"""Constructs the DGL server-specific env vars string that are required for DGL code to behave in the correct
server role.
Convenience function.
Args:
ip_config: IP config file containing IP addresses of cluster hosts.
Relative path to workspace.
num_proc_per_machine:
pythonpath: Optional. If given, this will pass this as PYTHONPATH.
Returns:
server_env_vars: The server-specific env-vars in a string format, friendly for CLI execution.
"""
server_env_vars_template = (
"DGL_IP_CONFIG={DGL_IP_CONFIG} "
"DGL_NUM_SERVER={DGL_NUM_SERVER} "
"{suffix_optional_envvars}"
)
suffix_optional_envvars = ""
if pythonpath:
suffix_optional_envvars += f"PYTHONPATH={pythonpath} "
return server_env_vars_template.format(
DGL_IP_CONFIG=ip_config,
DGL_NUM_SERVER=num_proc_per_machine,
suffix_optional_envvars=suffix_optional_envvars,
)
def wrap_cmd_with_local_envvars(cmd: str, env_vars: str) -> str:
"""Wraps a CLI command with desired env vars with the following properties:
(1) env vars persist for the entire `cmd`, even if it consists of multiple "chained" commands like:
cmd = "ls && pwd && python run/something.py"
(2) env vars don't pollute the environment after `cmd` completes.
Example:
>>> cmd = "ls && pwd"
>>> env_vars = "VAR1=value1 VAR2=value2"
>>> wrap_cmd_with_local_envvars(cmd, env_vars)
"(export VAR1=value1 VAR2=value2; ls && pwd)"
Args:
cmd:
env_vars: A string containing env vars, eg "VAR1=val1 VAR2=val2"
Returns:
cmd_with_env_vars:
"""
# use `export` to persist env vars for entire cmd block. required if udf_command is a chain of commands
# also: wrap in parens to not pollute env:
# https://stackoverflow.com/a/45993803
return f"(export {env_vars}; {cmd})"
def wrap_cmd_with_extra_envvars(cmd: str, env_vars: list) -> str:
"""Wraps a CLI command with extra env vars
Example:
>>> cmd = "ls && pwd"
>>> env_vars = ["VAR1=value1", "VAR2=value2"]
>>> wrap_cmd_with_extra_envvars(cmd, env_vars)
"(export VAR1=value1 VAR2=value2; ls && pwd)"
Args:
cmd:
env_vars: A list of strings containing env vars, e.g., ["VAR1=value1", "VAR2=value2"]
Returns:
cmd_with_env_vars:
"""
env_vars = " ".join(env_vars)
return wrap_cmd_with_local_envvars(cmd, env_vars)
def submit_jobs(args, udf_command):
"""Submit distributed jobs (server and client processes) via ssh"""
hosts = []
thread_list = []
server_count_per_machine = 0
# Get the IP addresses of the cluster.
#ip_config = os.path.join(args.workspace, args.ip_config)
ip_config = args.ip_config
with open(ip_config) as f:
for line in f:
result = line.strip().split()
if len(result) == 2:
ip = result[0]
port = int(result[1])
hosts.append((ip, port))
elif len(result) == 1:
ip = result[0]
port = DEFAULT_PORT
hosts.append((ip, port))
else:
raise RuntimeError("Format error of ip_config.")
server_count_per_machine = args.num_proc_per_machine
# launch server tasks
server_env_vars = construct_dgl_server_env_vars(
ip_config=args.ip_config,
num_proc_per_machine=args.num_proc_per_machine,
pythonpath=os.environ.get("PYTHONPATH", ""),
)
for i in range(len(hosts) * server_count_per_machine):
ip, _ = hosts[int(i / server_count_per_machine)]
server_env_vars_cur = f"{server_env_vars} RANK={i} MASTER_ADDR={hosts[0][0]} MASTER_PORT={args.master_port}"
cmd = wrap_cmd_with_local_envvars(udf_command, server_env_vars_cur)
print(cmd)
thread_list.append(execute_remote(cmd, ip, args.ssh_port, username=args.ssh_username))
# Start a cleanup process dedicated for cleaning up remote training jobs.
conn1,conn2 = multiprocessing.Pipe()
func = partial(get_all_remote_pids, hosts, args.ssh_port, udf_command)
process = multiprocessing.Process(target=cleanup_proc, args=(func, conn1))
process.start()
def signal_handler(signal, frame):
logging.info('Stop launcher')
# We need to tell the cleanup process to kill remote training jobs.
conn2.send('cleanup')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
for thread in thread_list:
thread.join()
# The training processes complete. We should tell the cleanup process to exit.
conn2.send('exit')
process.join()
def main():
parser = argparse.ArgumentParser(description='Launch a distributed job')
parser.add_argument('--ssh_port', type=int, default=22, help='SSH Port.')
parser.add_argument(
"--ssh_username", default="",
help="Optional. When issuing commands (via ssh) to cluster, use the provided username in the ssh cmd. "
"Example: If you provide --ssh_username=bob, then the ssh command will be like: 'ssh bob@1.2.3.4 CMD' "
"instead of 'ssh 1.2.3.4 CMD'"
)
parser.add_argument('--num_proc_per_machine', type=int,
help='The number of server processes per machine')
parser.add_argument('--master_port', type=int,
help='This port is used to form gloo group (randevouz server)')
parser.add_argument('--ip_config', type=str,
help='The file (in workspace) of IP configuration for server processes')
args, udf_command = parser.parse_known_args()
assert len(udf_command) == 1, 'Please provide user command line.'
assert args.num_proc_per_machine is not None and args.num_proc_per_machine > 0, \
'--num_proc_per_machine must be a positive number.'
assert args.ip_config is not None, \
'A user has to specify an IP configuration file with --ip_config.'
udf_command = str(udf_command[0])
if 'python' not in udf_command:
raise RuntimeError("DGL launching script can only support Python executable file.")
submit_jobs(args, udf_command)
if __name__ == '__main__':
fmt = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=fmt, level=logging.INFO)
main()
...@@ -19,7 +19,7 @@ OWNER_PROCESS = "owner_proc_id" ...@@ -19,7 +19,7 @@ OWNER_PROCESS = "owner_proc_id"
PART_LOCAL_NID = "part_local_nid" PART_LOCAL_NID = "part_local_nid"
GLOO_MESSAGING_TIMEOUT = 60*60 #seconds GLOO_MESSAGING_TIMEOUT = 60 #seconds
STR_NODE_TYPE = "node_type" STR_NODE_TYPE = "node_type"
STR_NUM_NODES_PER_CHUNK = "num_nodes_per_chunk" STR_NUM_NODES_PER_CHUNK = "num_nodes_per_chunk"
......
import argparse import argparse
import numpy as np import numpy as np
import torch.multiprocessing as mp import torch.multiprocessing as mp
import logging
import platform
import os
from data_shuffle import single_machine_run, multi_machine_run from data_shuffle import single_machine_run, multi_machine_run
def log_params(params): def log_params(params):
...@@ -45,4 +48,5 @@ if __name__ == "__main__": ...@@ -45,4 +48,5 @@ if __name__ == "__main__":
params = parser.parse_args() params = parser.parse_args()
#invoke the pipeline function #invoke the pipeline function
logging.basicConfig(level='INFO', format=f"[{platform.node()} %(levelname)s %(asctime)s PID:%(process)d] %(message)s")
multi_machine_run(params) multi_machine_run(params)
...@@ -7,6 +7,7 @@ import torch ...@@ -7,6 +7,7 @@ import torch
import torch.distributed as dist import torch.distributed as dist
import torch.multiprocessing as mp import torch.multiprocessing as mp
import dgl import dgl
import logging
from timeit import default_timer as timer from timeit import default_timer as timer
from datetime import timedelta from datetime import timedelta
...@@ -158,7 +159,7 @@ def exchange_edge_data(rank, world_size, edge_data): ...@@ -158,7 +159,7 @@ def exchange_edge_data(rank, world_size, edge_data):
dist.barrier () dist.barrier ()
output_list = alltoallv_cpu(rank, world_size, input_list) output_list = alltoallv_cpu(rank, world_size, input_list)
end = timer() end = timer()
print('[Rank: ', rank, '] Time to send/rcv edge data: ', timedelta(seconds=end-start)) logging.info(f'[Rank: {rank}] Time to send/rcv edge data: {timedelta(seconds=end-start)}')
#Replace the values of the edge_data, with the received data from all the other processes. #Replace the values of the edge_data, with the received data from all the other processes.
rcvd_edge_data = torch.cat(output_list).numpy() rcvd_edge_data = torch.cat(output_list).numpy()
...@@ -237,7 +238,7 @@ def exchange_node_features(rank, world_size, node_feature_tids, ntype_gnid_map, ...@@ -237,7 +238,7 @@ def exchange_node_features(rank, world_size, node_feature_tids, ntype_gnid_map,
global_nid_per_rank = [] global_nid_per_rank = []
feat_name = feat_info[0] feat_name = feat_info[0]
feat_key = ntype_name+'/'+feat_name feat_key = ntype_name+'/'+feat_name
print('[Rank: ', rank, '] processing node feature: ', feat_key) logging.info(f'[Rank: {rank}] processing node feature: {feat_key}')
#compute the global_nid range for this node features #compute the global_nid range for this node features
type_nid_start = int(feat_info[1]) type_nid_start = int(feat_info[1])
...@@ -281,7 +282,7 @@ def exchange_node_features(rank, world_size, node_feature_tids, ntype_gnid_map, ...@@ -281,7 +282,7 @@ def exchange_node_features(rank, world_size, node_feature_tids, ntype_gnid_map,
own_global_nids[feat_key] = torch.cat(output_nid_list).numpy() own_global_nids[feat_key] = torch.cat(output_nid_list).numpy()
end = timer() end = timer()
print('[Rank: ', rank, '] Total time for node feature exchange: ', timedelta(seconds = end - start)) logging.info(f'[Rank: {rank}] Total time for node feature exchange: {timedelta(seconds = end - start)}')
return own_node_features, own_global_nids return own_node_features, own_global_nids
def exchange_graph_data(rank, world_size, node_features, node_feat_tids, edge_data, def exchange_graph_data(rank, world_size, node_features, node_feat_tids, edge_data,
...@@ -334,7 +335,7 @@ def exchange_graph_data(rank, world_size, node_features, node_feat_tids, edge_da ...@@ -334,7 +335,7 @@ def exchange_graph_data(rank, world_size, node_features, node_feat_tids, edge_da
""" """
rcvd_node_features, rcvd_global_nids = exchange_node_features(rank, world_size, node_feat_tids, \ rcvd_node_features, rcvd_global_nids = exchange_node_features(rank, world_size, node_feat_tids, \
ntypes_gnid_range_map, node_part_ids, node_features) ntypes_gnid_range_map, node_part_ids, node_features)
print( 'Rank: ', rank, ' Done with node features exchange.') logging.info(f'[Rank: {rank}] Done with node features exchange.')
node_data = gen_node_data(rank, world_size, node_part_ids, ntid_ntype_map, schema_map) node_data = gen_node_data(rank, world_size, node_part_ids, ntid_ntype_map, schema_map)
edge_data = exchange_edge_data(rank, world_size, edge_data) edge_data = exchange_edge_data(rank, world_size, edge_data)
...@@ -384,9 +385,10 @@ def read_dataset(rank, world_size, node_part_ids, params, schema_map): ...@@ -384,9 +385,10 @@ def read_dataset(rank, world_size, node_part_ids, params, schema_map):
#node_tids, node_features, edge_datadict, edge_tids #node_tids, node_features, edge_datadict, edge_tids
node_tids, node_features, node_feat_tids, edge_data, edge_tids = \ node_tids, node_features, node_feat_tids, edge_data, edge_tids = \
get_dataset(params.input_dir, params.graph_name, rank, world_size, schema_map) get_dataset(params.input_dir, params.graph_name, rank, world_size, schema_map)
logging.info(f'[Rank: {rank}] Done reading dataset deom {params.input_dir}')
augment_edge_data(edge_data, node_part_ids, edge_tids, rank, world_size) augment_edge_data(edge_data, node_part_ids, edge_tids, rank, world_size)
print('[Rank: ', rank, '] Done augmenting edge_data: ', len(edge_data), edge_data[constants.GLOBAL_SRC_ID].shape) logging.info(f'[Rank: {rank}] Done augmenting edge_data: {len(edge_data)}, {edge_data[constants.GLOBAL_SRC_ID].shape}')
return node_tids, node_features, node_feat_tids, edge_data, edge_features return node_tids, node_features, node_feat_tids, edge_data, edge_features
...@@ -515,7 +517,7 @@ def gen_dist_partitions(rank, world_size, params): ...@@ -515,7 +517,7 @@ def gen_dist_partitions(rank, world_size, params):
this object, key value pairs, provides access to the command line arguments from the runtime environment this object, key value pairs, provides access to the command line arguments from the runtime environment
""" """
global_start = timer() global_start = timer()
print('[Rank: ', rank, '] Starting distributed data processing pipeline...') logging.info(f'[Rank: {rank}] Starting distributed data processing pipeline...')
#init processing #init processing
schema_map = read_json(os.path.join(params.input_dir, params.schema)) schema_map = read_json(os.path.join(params.input_dir, params.schema))
...@@ -526,12 +528,12 @@ def gen_dist_partitions(rank, world_size, params): ...@@ -526,12 +528,12 @@ def gen_dist_partitions(rank, world_size, params):
node_part_ids = read_ntype_partition_files(schema_map, os.path.join(params.input_dir, params.partitions_dir)) node_part_ids = read_ntype_partition_files(schema_map, os.path.join(params.input_dir, params.partitions_dir))
ntypes_ntypeid_map, ntypes, ntypeid_ntypes_map = get_node_types(schema_map) ntypes_ntypeid_map, ntypes, ntypeid_ntypes_map = get_node_types(schema_map)
print('[Rank: ', rank, '] Initialized metis partitions and node_types map...') logging.info(f'[Rank: {rank}] Initialized metis partitions and node_types map...')
#read input graph files and augment these datastructures with #read input graph files and augment these datastructures with
#appropriate information (global_nid and owner process) for node and edge data #appropriate information (global_nid and owner process) for node and edge data
node_tids, node_features, node_feat_tids, edge_data, edge_features = read_dataset(rank, world_size, node_part_ids, params, schema_map) node_tids, node_features, node_feat_tids, edge_data, edge_features = read_dataset(rank, world_size, node_part_ids, params, schema_map)
print('[Rank: ', rank, '] Done augmenting file input data with auxilary columns') logging.info(f'[Rank: {rank}] Done augmenting file input data with auxilary columns')
#send out node and edge data --- and appropriate features. #send out node and edge data --- and appropriate features.
#this function will also stitch the data recvd from other processes #this function will also stitch the data recvd from other processes
...@@ -541,17 +543,17 @@ def gen_dist_partitions(rank, world_size, params): ...@@ -541,17 +543,17 @@ def gen_dist_partitions(rank, world_size, params):
exchange_graph_data(rank, world_size, node_features, node_feat_tids, \ exchange_graph_data(rank, world_size, node_features, node_feat_tids, \
edge_data, node_part_ids, ntypes_ntypeid_map, ntypes_gnid_range_map, \ edge_data, node_part_ids, ntypes_ntypeid_map, ntypes_gnid_range_map, \
ntypeid_ntypes_map, schema_map) ntypeid_ntypes_map, schema_map)
print('[Rank: ', rank, '] Done with data shuffling...') logging.info(f'[Rank: {rank}] Done with data shuffling...')
#sort node_data by ntype #sort node_data by ntype
idx = node_data[constants.NTYPE_ID].argsort() idx = node_data[constants.NTYPE_ID].argsort()
for k, v in node_data.items(): for k, v in node_data.items():
node_data[k] = v[idx] node_data[k] = v[idx]
print('[Rank: ', rank, '] Sorted node_data by node_type') logging.info(f'[Rank: {rank}] Sorted node_data by node_type')
#resolve global_ids for nodes #resolve global_ids for nodes
assign_shuffle_global_nids_nodes(rank, world_size, node_data) assign_shuffle_global_nids_nodes(rank, world_size, node_data)
print('[Rank: ', rank, '] Done assigning global-ids to nodes...') logging.info(f'[Rank: {rank}] Done assigning global-ids to nodes...')
#shuffle node feature according to the node order on each rank. #shuffle node feature according to the node order on each rank.
for ntype_name in ntypes: for ntype_name in ntypes:
...@@ -573,11 +575,11 @@ def gen_dist_partitions(rank, world_size, params): ...@@ -573,11 +575,11 @@ def gen_dist_partitions(rank, world_size, params):
edge_data[k] = v[sorted_idx] edge_data[k] = v[sorted_idx]
shuffle_global_eid_start = assign_shuffle_global_nids_edges(rank, world_size, edge_data) shuffle_global_eid_start = assign_shuffle_global_nids_edges(rank, world_size, edge_data)
print('[Rank: ', rank, '] Done assigning global_ids to edges ...') logging.info(f'[Rank: {rank}] Done assigning global_ids to edges ...')
#determine global-ids for edge end-points #determine global-ids for edge end-points
get_shuffle_global_nids_edges(rank, world_size, edge_data, node_part_ids, node_data) get_shuffle_global_nids_edges(rank, world_size, edge_data, node_part_ids, node_data)
print('[Rank: ', rank, '] Done resolving orig_node_id for local node_ids...') logging.info(f'[Rank: {rank}] Done resolving orig_node_id for local node_ids...')
#create dgl objects here #create dgl objects here
start = timer() start = timer()
...@@ -602,10 +604,10 @@ def gen_dist_partitions(rank, world_size, params): ...@@ -602,10 +604,10 @@ def gen_dist_partitions(rank, world_size, params):
#send meta-data to Rank-0 process #send meta-data to Rank-0 process
gather_metadata_json(json_metadata, rank, world_size) gather_metadata_json(json_metadata, rank, world_size)
end = timer() end = timer()
print('[Rank: ', rank, '] Time to create dgl objects: ', timedelta(seconds = end - start)) logging.info(f'[Rank: {rank}] Time to create dgl objects: {timedelta(seconds = end - start)}')
global_end = timer() global_end = timer()
print('[Rank: ', rank, '] Total execution time of the program: ', timedelta(seconds = global_end - global_start)) logging.info(f'[Rank: {rank}] Total execution time of the program: {timedelta(seconds = global_end - global_start)}')
def single_machine_run(params): def single_machine_run(params):
""" Main function for distributed implementation on a single machine """ Main function for distributed implementation on a single machine
...@@ -668,9 +670,13 @@ def multi_machine_run(params): ...@@ -668,9 +670,13 @@ def multi_machine_run(params):
rank = int(os.environ["RANK"]) rank = int(os.environ["RANK"])
#init the gloo process group here. #init the gloo process group here.
dist.init_process_group("gloo", rank=rank, world_size=params.world_size, timeout=timedelta(seconds=constants.GLOO_MESSAGING_TIMEOUT)) dist.init_process_group(
print('[Rank: ', rank, '] Done with process group initialization...') backend="gloo",
rank=rank,
world_size=params.world_size,
timeout=timedelta(seconds=constants.GLOO_MESSAGING_TIMEOUT))
logging.info(f'[Rank: {rank}] Done with process group initialization...')
#invoke the main function here. #invoke the main function here.
gen_dist_partitions(rank, params.world_size, params) gen_dist_partitions(rank, params.world_size, params)
print('[Rank: ', rank, '] Done with Distributed data processing pipeline processing.') logging.info(f'[Rank: {rank}] Done with Distributed data processing pipeline processing.')
...@@ -2,6 +2,7 @@ import os ...@@ -2,6 +2,7 @@ import os
import numpy as np import numpy as np
import constants import constants
import torch import torch
import logging
import pyarrow import pyarrow
from pyarrow import csv from pyarrow import csv
...@@ -111,11 +112,14 @@ def get_dataset(input_dir, graph_name, rank, world_size, schema_map): ...@@ -111,11 +112,14 @@ def get_dataset(input_dir, graph_name, rank, world_size, schema_map):
assert feat_data[constants.STR_FORMAT][constants.STR_NAME] == constants.STR_NUMPY assert feat_data[constants.STR_FORMAT][constants.STR_NAME] == constants.STR_NUMPY
my_feat_data_fname = feat_data[constants.STR_DATA][rank] #this will be just the file name my_feat_data_fname = feat_data[constants.STR_DATA][rank] #this will be just the file name
if (os.path.isabs(my_feat_data_fname)): if (os.path.isabs(my_feat_data_fname)):
logging.info(f'Loading numpy from {my_feat_data_fname}')
node_features[ntype_name+'/'+feat_name] = \ node_features[ntype_name+'/'+feat_name] = \
torch.from_numpy(np.load(my_feat_data_fname)) torch.from_numpy(np.load(my_feat_data_fname))
else: else:
numpy_path = os.path.join(input_dir, my_feat_data_fname)
logging.info(f'Loading numpy from {numpy_path}')
node_features[ntype_name+'/'+feat_name] = \ node_features[ntype_name+'/'+feat_name] = \
torch.from_numpy(np.load(os.path.join(input_dir, my_feat_data_fname))) torch.from_numpy(np.load(numpy_path))
node_feature_tids[ntype_name].append([feat_name, -1, -1]) node_feature_tids[ntype_name].append([feat_name, -1, -1])
...@@ -155,10 +159,10 @@ def get_dataset(input_dir, graph_name, rank, world_size, schema_map): ...@@ -155,10 +159,10 @@ def get_dataset(input_dir, graph_name, rank, world_size, schema_map):
#done build node_features locally. #done build node_features locally.
if len(node_features) <= 0: if len(node_features) <= 0:
print('[Rank: ', rank, '] This dataset does not have any node features') logging.info(f'[Rank: {rank}] This dataset does not have any node features')
else: else:
for k, v in node_features.items(): for k, v in node_features.items():
print('[Rank: ', rank, '] node feature name: ', k, ', feature data shape: ', v.size()) logging.info(f'[Rank: {rank}] node feature name: {k}, feature data shape: {v.size()}')
''' '''
Code below is used to read edges from the input dataset with the help of the metadata json file Code below is used to read edges from the input dataset with the help of the metadata json file
...@@ -230,6 +234,7 @@ def get_dataset(input_dir, graph_name, rank, world_size, schema_map): ...@@ -230,6 +234,7 @@ def get_dataset(input_dir, graph_name, rank, world_size, schema_map):
rel_name = tokens[1] rel_name = tokens[1]
dst_ntype_name = tokens[2] dst_ntype_name = tokens[2]
logging.info(f'Reading csv files from {edge_info[rank]}')
data_df = csv.read_csv(edge_info[rank], read_options=pyarrow.csv.ReadOptions(autogenerate_column_names=True), data_df = csv.read_csv(edge_info[rank], read_options=pyarrow.csv.ReadOptions(autogenerate_column_names=True),
parse_options=pyarrow.csv.ParseOptions(delimiter=' ')) parse_options=pyarrow.csv.ParseOptions(delimiter=' '))
#currently these are just type_edge_ids... which will be converted to global ids #currently these are just type_edge_ids... which will be converted to global ids
...@@ -247,7 +252,7 @@ def get_dataset(input_dir, graph_name, rank, world_size, schema_map): ...@@ -247,7 +252,7 @@ def get_dataset(input_dir, graph_name, rank, world_size, schema_map):
assert edge_datadict[constants.GLOBAL_SRC_ID].shape == edge_datadict[constants.GLOBAL_DST_ID].shape assert edge_datadict[constants.GLOBAL_SRC_ID].shape == edge_datadict[constants.GLOBAL_DST_ID].shape
assert edge_datadict[constants.GLOBAL_DST_ID].shape == edge_datadict[constants.GLOBAL_TYPE_EID].shape assert edge_datadict[constants.GLOBAL_DST_ID].shape == edge_datadict[constants.GLOBAL_TYPE_EID].shape
assert edge_datadict[constants.GLOBAL_TYPE_EID].shape == edge_datadict[constants.ETYPE_ID].shape assert edge_datadict[constants.GLOBAL_TYPE_EID].shape == edge_datadict[constants.ETYPE_ID].shape
print('[Rank: ', rank, '] Done reading edge_file: ', len(edge_datadict), edge_datadict[constants.GLOBAL_SRC_ID].shape) logging.info(f'[Rank: {rank}] Done reading edge_file: {len(edge_datadict)}, {edge_datadict[constants.GLOBAL_SRC_ID].shape}')
return node_tids, node_features, node_feature_tids, edge_datadict, edge_tids return node_tids, node_features, node_feature_tids, edge_datadict, edge_tids
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment