distgraphlaunch.py 16.1 KB
Newer Older
1
2
3
"""Launching tool for DGL distributed training"""
import argparse
import json
4
import logging
5
import multiprocessing
6
import os
7
import re
8
9
10
11
12
import signal
import stat
import subprocess
import sys
import time
13
14
15
16
17
18
from functools import partial
from threading import Thread
from typing import Optional

DEFAULT_PORT = 30050

19

20
def cleanup_proc(get_all_remote_pids, conn):
21
22
    """This process tries to clean up the remote training tasks."""
    print("cleanupu process runs")
23
24
25
26
27
    # This process should not handle SIGINT.
    signal.signal(signal.SIGINT, signal.SIG_IGN)

    data = conn.recv()
    # If the launch process exits normally, this process doesn't need to do anything.
28
    if data == "exit":
29
30
31
32
33
34
        sys.exit(0)
    else:
        remote_pids = get_all_remote_pids()
        # Otherwise, we need to ssh to each machine and kill the training jobs.
        for (ip, port), pids in remote_pids.items():
            kill_process(ip, port, pids)
35
36
    print("cleanup process exits")

37
38

def kill_process(ip, port, pids):
39
    """ssh to a remote machine and kill the specified processes."""
40
41
42
43
44
45
46
    curr_pid = os.getpid()
    killed_pids = []
    # If we kill child processes first, the parent process may create more again. This happens
    # to Python's process pool. After sorting, we always kill parent processes first.
    pids.sort()
    for pid in pids:
        assert curr_pid != pid
47
48
49
50
51
52
53
54
        print("kill process {} on {}:{}".format(pid, ip, port), flush=True)
        kill_cmd = (
            "ssh -o StrictHostKeyChecking=no -p "
            + str(port)
            + " "
            + ip
            + " 'kill {}'".format(pid)
        )
55
56
57
58
59
60
61
62
63
64
        subprocess.run(kill_cmd, shell=True)
        killed_pids.append(pid)
    # It's possible that some of the processes are not killed. Let's try again.
    for i in range(3):
        killed_pids = get_killed_pids(ip, port, killed_pids)
        if len(killed_pids) == 0:
            break
        else:
            killed_pids.sort()
            for pid in killed_pids:
65
66
67
68
69
70
71
72
73
74
                print(
                    "kill process {} on {}:{}".format(pid, ip, port), flush=True
                )
                kill_cmd = (
                    "ssh -o StrictHostKeyChecking=no -p "
                    + str(port)
                    + " "
                    + ip
                    + " 'kill -9 {}'".format(pid)
                )
75
76
                subprocess.run(kill_cmd, shell=True)

77

78
def get_killed_pids(ip, port, killed_pids):
79
    """Get the process IDs that we want to kill but are still alive."""
80
    killed_pids = [str(pid) for pid in killed_pids]
81
82
83
84
85
86
87
88
    killed_pids = ",".join(killed_pids)
    ps_cmd = (
        "ssh -o StrictHostKeyChecking=no -p "
        + str(port)
        + " "
        + ip
        + " 'ps -p {} -h'".format(killed_pids)
    )
89
90
    res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE)
    pids = []
91
    for p in res.stdout.decode("utf-8").split("\n"):
92
93
94
95
96
        l = p.split()
        if len(l) > 0:
            pids.append(int(l[0]))
    return pids

97

98
def execute_remote(
99
    cmd: str, ip: str, port: int, username: Optional[str] = ""
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
) -> Thread:
    """Execute command line on remote machine via ssh.

    Args:
        cmd: User-defined command (udf) to execute on the remote host.
        ip: The ip-address of the host to run the command on.
        port: Port number that the host is listening on.
        thread_list:
        username: Optional. If given, this will specify a username to use when issuing commands over SSH.
            Useful when your infra requires you to explicitly specify a username to avoid permission issues.

    Returns:
        thread: The Thread whose run() is to run the `cmd` on the remote host. Returns when the cmd completes
            on the remote host.
    """
    ip_prefix = ""
    if username:
        ip_prefix += "{username}@".format(username=username)

    # Construct ssh command that executes `cmd` on the remote host
    ssh_cmd = "ssh -o StrictHostKeyChecking=no -p {port} {ip_prefix}{ip} '{cmd}'".format(
        port=str(port),
        ip_prefix=ip_prefix,
        ip=ip,
        cmd=cmd,
    )

    # thread func to run the job
    def run(ssh_cmd):
        subprocess.check_call(ssh_cmd, shell=True)

    thread = Thread(target=run, args=(ssh_cmd,))
    thread.setDaemon(True)
    thread.start()
    return thread

136

137
def get_remote_pids(ip, port, cmd_regex):
138
    """Get the process IDs that run the command in the remote machine."""
139
140
141
    pids = []
    curr_pid = os.getpid()
    # Here we want to get the python processes. We may get some ssh processes, so we should filter them out.
142
143
144
145
146
147
148
    ps_cmd = (
        "ssh -o StrictHostKeyChecking=no -p "
        + str(port)
        + " "
        + ip
        + " 'ps -aux | grep python | grep -v StrictHostKeyChecking'"
    )
149
    res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE)
150
    for p in res.stdout.decode("utf-8").split("\n"):
151
152
153
154
155
156
157
158
        l = p.split()
        if len(l) < 2:
            continue
        # We only get the processes that run the specified command.
        res = re.search(cmd_regex, p)
        if res is not None and int(l[1]) != curr_pid:
            pids.append(l[1])

159
160
161
162
163
164
165
166
    pid_str = ",".join([str(pid) for pid in pids])
    ps_cmd = (
        "ssh -o StrictHostKeyChecking=no -p "
        + str(port)
        + " "
        + ip
        + " 'pgrep -P {}'".format(pid_str)
    )
167
    res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE)
168
    pids1 = res.stdout.decode("utf-8").split("\n")
169
170
    all_pids = []
    for pid in set(pids + pids1):
171
        if pid == "" or int(pid) == curr_pid:
172
173
174
175
176
            continue
        all_pids.append(int(pid))
    all_pids.sort()
    return all_pids

177

178
def get_all_remote_pids(hosts, ssh_port, udf_command):
179
    """Get all remote processes."""
180
181
182
183
184
185
    remote_pids = {}
    for node_id, host in enumerate(hosts):
        ip, _ = host
        # When creating training processes in remote machines, we may insert some arguments
        # in the commands. We need to use regular expressions to match the modified command.
        cmds = udf_command.split()
186
        new_udf_command = " .*".join(cmds)
187
188
189
190
191
192
193
194
195
196
        pids = get_remote_pids(ip, ssh_port, new_udf_command)
        remote_pids[(ip, ssh_port)] = pids
    return remote_pids


def construct_torch_dist_launcher_cmd(
    num_trainers: int,
    num_nodes: int,
    node_rank: int,
    master_addr: str,
197
    master_port: int,
198
199
200
201
202
203
204
205
206
207
208
209
210
211
) -> str:
    """Constructs the torch distributed launcher command.
    Helper function.

    Args:
        num_trainers:
        num_nodes:
        node_rank:
        master_addr:
        master_port:

    Returns:
        cmd_str.
    """
212
213
214
215
216
217
218
219
    torch_cmd_template = (
        "-m torch.distributed.launch "
        "--nproc_per_node={nproc_per_node} "
        "--nnodes={nnodes} "
        "--node_rank={node_rank} "
        "--master_addr={master_addr} "
        "--master_port={master_port}"
    )
220
221
222
223
224
    return torch_cmd_template.format(
        nproc_per_node=num_trainers,
        nnodes=num_nodes,
        node_rank=node_rank,
        master_addr=master_addr,
225
        master_port=master_port,
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
    )


def wrap_udf_in_torch_dist_launcher(
    udf_command: str,
    num_trainers: int,
    num_nodes: int,
    node_rank: int,
    master_addr: str,
    master_port: int,
) -> str:
    """Wraps the user-defined function (udf_command) with the torch.distributed.launch module.

     Example: if udf_command is "python3 run/some/trainer.py arg1 arg2", then new_df_command becomes:
         "python3 -m torch.distributed.launch <TORCH DIST ARGS> run/some/trainer.py arg1 arg2

    udf_command is assumed to consist of pre-commands (optional) followed by the python launcher script (required):
    Examples:
        # simple
        python3.7 path/to/some/trainer.py arg1 arg2

        # multi-commands
        (cd some/dir && python3.7 path/to/some/trainer.py arg1 arg2)

    IMPORTANT: If udf_command consists of multiple python commands, then this will result in undefined behavior.

    Args:
        udf_command:
        num_trainers:
        num_nodes:
        node_rank:
        master_addr:
        master_port:

    Returns:

    """
    torch_dist_cmd = construct_torch_dist_launcher_cmd(
        num_trainers=num_trainers,
        num_nodes=num_nodes,
        node_rank=node_rank,
        master_addr=master_addr,
268
        master_port=master_port,
269
270
271
272
273
274
275
    )
    # Auto-detect the python binary that kicks off the distributed trainer code.
    # Note: This allowlist order matters, this will match with the FIRST matching entry. Thus, please add names to this
    #       from most-specific to least-specific order eg:
    #           (python3.7, python3.8) -> (python3)
    # The allowed python versions are from this: https://www.dgl.ai/pages/start.html
    python_bin_allowlist = (
276
277
278
279
280
        "python3.6",
        "python3.7",
        "python3.8",
        "python3.9",
        "python3",
281
        # for backwards compatibility, accept python2 but technically DGL is a py3 library, so this is not recommended
282
283
        "python2.7",
        "python2",
284
285
286
287
288
289
290
291
292
293
294
295
296
297
    )
    # If none of the candidate python bins match, then we go with the default `python`
    python_bin = "python"
    for candidate_python_bin in python_bin_allowlist:
        if candidate_python_bin in udf_command:
            python_bin = candidate_python_bin
            break

    # transforms the udf_command from:
    #     python path/to/dist_trainer.py arg0 arg1
    # to:
    #     python -m torch.distributed.launch [DIST TORCH ARGS] path/to/dist_trainer.py arg0 arg1
    # Note: if there are multiple python commands in `udf_command`, this may do the Wrong Thing, eg launch each
    #       python command within the torch distributed launcher.
298
299
300
    new_udf_command = udf_command.replace(
        python_bin, f"{python_bin} {torch_dist_cmd}"
    )
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363

    return new_udf_command


def construct_dgl_server_env_vars(
    ip_config: str,
    num_proc_per_machine: int,
    pythonpath: Optional[str] = "",
) -> str:
    """Constructs the DGL server-specific env vars string that are required for DGL code to behave in the correct
    server role.
    Convenience function.

    Args:
        ip_config: IP config file containing IP addresses of cluster hosts.
            Relative path to workspace.
        num_proc_per_machine:
        pythonpath: Optional. If given, this will pass this as PYTHONPATH.

    Returns:
        server_env_vars: The server-specific env-vars in a string format, friendly for CLI execution.

    """
    server_env_vars_template = (
        "DGL_IP_CONFIG={DGL_IP_CONFIG} "
        "DGL_NUM_SERVER={DGL_NUM_SERVER} "
        "{suffix_optional_envvars}"
    )
    suffix_optional_envvars = ""
    if pythonpath:
        suffix_optional_envvars += f"PYTHONPATH={pythonpath} "
    return server_env_vars_template.format(
        DGL_IP_CONFIG=ip_config,
        DGL_NUM_SERVER=num_proc_per_machine,
        suffix_optional_envvars=suffix_optional_envvars,
    )


def wrap_cmd_with_local_envvars(cmd: str, env_vars: str) -> str:
    """Wraps a CLI command with desired env vars with the following properties:
    (1) env vars persist for the entire `cmd`, even if it consists of multiple "chained" commands like:
        cmd = "ls && pwd && python run/something.py"
    (2) env vars don't pollute the environment after `cmd` completes.

    Example:
        >>> cmd = "ls && pwd"
        >>> env_vars = "VAR1=value1 VAR2=value2"
        >>> wrap_cmd_with_local_envvars(cmd, env_vars)
        "(export VAR1=value1 VAR2=value2; ls && pwd)"

    Args:
        cmd:
        env_vars: A string containing env vars, eg "VAR1=val1 VAR2=val2"

    Returns:
        cmd_with_env_vars:

    """
    # use `export` to persist env vars for entire cmd block. required if udf_command is a chain of commands
    # also: wrap in parens to not pollute env:
    #     https://stackoverflow.com/a/45993803
    return f"(export {env_vars}; {cmd})"

364

365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
def wrap_cmd_with_extra_envvars(cmd: str, env_vars: list) -> str:
    """Wraps a CLI command with extra env vars

    Example:
        >>> cmd = "ls && pwd"
        >>> env_vars = ["VAR1=value1", "VAR2=value2"]
        >>> wrap_cmd_with_extra_envvars(cmd, env_vars)
        "(export VAR1=value1 VAR2=value2; ls && pwd)"

    Args:
        cmd:
        env_vars: A list of strings containing env vars, e.g., ["VAR1=value1", "VAR2=value2"]

    Returns:
        cmd_with_env_vars:
    """
    env_vars = " ".join(env_vars)
    return wrap_cmd_with_local_envvars(cmd, env_vars)

384

385
386
387
388
389
390
391
def submit_jobs(args, udf_command):
    """Submit distributed jobs (server and client processes) via ssh"""
    hosts = []
    thread_list = []
    server_count_per_machine = 0

    # Get the IP addresses of the cluster.
392
    # ip_config = os.path.join(args.workspace, args.ip_config)
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
    ip_config = args.ip_config
    with open(ip_config) as f:
        for line in f:
            result = line.strip().split()
            if len(result) == 2:
                ip = result[0]
                port = int(result[1])
                hosts.append((ip, port))
            elif len(result) == 1:
                ip = result[0]
                port = DEFAULT_PORT
                hosts.append((ip, port))
            else:
                raise RuntimeError("Format error of ip_config.")
            server_count_per_machine = args.num_proc_per_machine

    # launch server tasks
    server_env_vars = construct_dgl_server_env_vars(
        ip_config=args.ip_config,
        num_proc_per_machine=args.num_proc_per_machine,
        pythonpath=os.environ.get("PYTHONPATH", ""),
    )
    for i in range(len(hosts) * server_count_per_machine):
        ip, _ = hosts[int(i / server_count_per_machine)]
        server_env_vars_cur = f"{server_env_vars} RANK={i} MASTER_ADDR={hosts[0][0]} MASTER_PORT={args.master_port}"
        cmd = wrap_cmd_with_local_envvars(udf_command, server_env_vars_cur)
        print(cmd)
420
421
422
        thread_list.append(
            execute_remote(cmd, ip, args.ssh_port, username=args.ssh_username)
        )
423
424

    # Start a cleanup process dedicated for cleaning up remote training jobs.
425
    conn1, conn2 = multiprocessing.Pipe()
426
427
428
429
430
    func = partial(get_all_remote_pids, hosts, args.ssh_port, udf_command)
    process = multiprocessing.Process(target=cleanup_proc, args=(func, conn1))
    process.start()

    def signal_handler(signal, frame):
431
        logging.info("Stop launcher")
432
        # We need to tell the cleanup process to kill remote training jobs.
433
        conn2.send("cleanup")
434
        sys.exit(0)
435

436
437
438
439
440
    signal.signal(signal.SIGINT, signal_handler)

    for thread in thread_list:
        thread.join()
    # The training processes complete. We should tell the cleanup process to exit.
441
    conn2.send("exit")
442
443
444
445
    process.join()


def main():
446
447
    parser = argparse.ArgumentParser(description="Launch a distributed job")
    parser.add_argument("--ssh_port", type=int, default=22, help="SSH Port.")
448
    parser.add_argument(
449
450
        "--ssh_username",
        default="",
451
        help="Optional. When issuing commands (via ssh) to cluster, use the provided username in the ssh cmd. "
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
        "Example: If you provide --ssh_username=bob, then the ssh command will be like: 'ssh bob@1.2.3.4 CMD' "
        "instead of 'ssh 1.2.3.4 CMD'",
    )
    parser.add_argument(
        "--num_proc_per_machine",
        type=int,
        help="The number of server processes per machine",
    )
    parser.add_argument(
        "--master_port",
        type=int,
        help="This port is used to form gloo group (randevouz server)",
    )
    parser.add_argument(
        "--ip_config",
        type=str,
        help="The file (in workspace) of IP configuration for server processes",
469
470
471
    )

    args, udf_command = parser.parse_known_args()
472
473
474
475
476
477
478
    assert len(udf_command) == 1, "Please provide user command line."
    assert (
        args.num_proc_per_machine is not None and args.num_proc_per_machine > 0
    ), "--num_proc_per_machine must be a positive number."
    assert (
        args.ip_config is not None
    ), "A user has to specify an IP configuration file with --ip_config."
479
480

    udf_command = str(udf_command[0])
481
482
483
484
    if "python" not in udf_command:
        raise RuntimeError(
            "DGL launching script can only support Python executable file."
        )
485
486
487

    submit_jobs(args, udf_command)

488
489
490

if __name__ == "__main__":
    fmt = "%(asctime)s %(levelname)s %(message)s"
491
492
    logging.basicConfig(format=fmt, level=logging.INFO)
    main()