runner.py 28.4 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""SuperBench Runner."""

6
import os
7
import sys
8
import json
9
import random
10
import signal
11
from pathlib import Path
12
from pprint import pformat
13
from collections import defaultdict
14

15
import jsonlines
16
from natsort import natsorted
17
from joblib import Parallel, delayed
18
19
from omegaconf import ListConfig, OmegaConf

20
from superbench.common.utils import SuperBenchLogger, logger, gen_ibstat, gen_traffic_pattern_host_groups
21
from superbench.common.utils.lazy_import import LazyImport
22
from superbench.benchmarks import ReduceType, Reducer
23
from superbench.monitor import MonitorRecord
24
from superbench.runner.numactl import get_local_numactl_command
25

26
27
AnsibleClient = LazyImport('superbench.runner.ansible', 'AnsibleClient')

28
29
30

class SuperBenchRunner():
    """SuperBench runner class."""
31
    def __init__(self, sb_config, docker_config, ansible_config, sb_output_dir):
32
33
34
35
36
37
        """Initilize.

        Args:
            sb_config (DictConfig): SuperBench config object.
            docker_config (DictConfig): Docker config object.
            ansible_config (DictConfig): Ansible config object.
38
            sb_output_dir (str): SuperBench output directory.
39
40
41
42
        """
        self._sb_config = sb_config
        self._docker_config = docker_config
        self._ansible_config = ansible_config
43
44
        self._sb_output_dir = sb_output_dir
        self._output_path = Path(sb_output_dir).expanduser().resolve()
45
        self._ansible_client = AnsibleClient(ansible_config)
46
47

        self.__set_logger('sb-run.log')
48
        logger.info('Runner uses config: %s.', pformat(OmegaConf.to_container(self._sb_config, resolve=True)))
49
        logger.info('Runner writes to: %s.', str(self._output_path))
50

51
        self._sb_benchmarks = self._sb_config.superbench.benchmarks
52
        self.__validate_sb_config()
53
54
55
        self._sb_enabled_benchmarks = self.__get_enabled_benchmarks()
        logger.info('Runner will run: %s', self._sb_enabled_benchmarks)

56
57
58
59
60
    @property
    def _container_name(self):
        """Get docker container name with backward-compatible default."""
        return getattr(self._docker_config, 'container_name', 'sb-workspace')

61
62
63
64
65
66
    def __set_logger(self, filename):
        """Set logger and add file handler.

        Args:
            filename (str): Log file name.
        """
67
        SuperBenchLogger.add_handler(logger.logger, filename=str(self._output_path / filename))
68

69
70
71
72
    def __validate_mpi_bind_to(self, bind_to):
        """Validate mpi bind_to option."""
        valid_mpi_bind_to = {'slot', 'hwthread', 'core', 'l1cache', 'l2cache', 'l3cache', 'package', 'numa', 'none'}
        if bind_to not in valid_mpi_bind_to:
one's avatar
one committed
73
            raise ValueError('Invalid bind_to value {}. Must be one of: {}'.format(bind_to, sorted(valid_mpi_bind_to)))
74

Yifan Xiong's avatar
Yifan Xiong committed
75
    def __validate_sb_config(self):    # noqa: C901
76
77
78
79
80
81
        """Validate SuperBench config object.

        Raise:
            InvalidConfigError: If input config is invalid.
        """
        # TODO: add validation and defaulting
82
        if 'env' not in self._sb_config.superbench:
83
            self._sb_config.superbench.env = {}
84
        for name in self._sb_benchmarks:
85
            if 'modes' not in self._sb_benchmarks[name]:
86
87
                self._sb_benchmarks[name].modes = []
            for idx, mode in enumerate(self._sb_benchmarks[name].modes):
88
                if 'env' not in mode:
89
                    self._sb_benchmarks[name].modes[idx].env = {}
90
                if mode.name == 'local':
91
                    if 'proc_num' not in mode:
92
                        self._sb_benchmarks[name].modes[idx].proc_num = 1
93
                    if 'prefix' not in mode:
94
95
                        self._sb_benchmarks[name].modes[idx].prefix = ''
                elif mode.name == 'torch.distributed':
96
                    if 'proc_num' not in mode:
97
                        self._sb_benchmarks[name].modes[idx].proc_num = 8
Yifan Xiong's avatar
Yifan Xiong committed
98
                elif mode.name == 'mpi':
99
                    if 'mca' not in mode:
Yifan Xiong's avatar
Yifan Xiong committed
100
101
102
103
104
105
                        self._sb_benchmarks[name].modes[idx].mca = {
                            'pml': 'ob1',
                            'btl': '^openib',
                            'btl_tcp_if_exclude': 'lo,docker0',
                            'coll_hcoll_enable': 0,
                        }
one's avatar
one committed
106
107
                    if 'bind_to' not in mode:
                        self._sb_benchmarks[name].modes[idx].bind_to = 'numa'
108
109
                    else:
                        self.__validate_mpi_bind_to(mode.bind_to)
110
                    for key in ['PATH', 'LD_LIBRARY_PATH', 'SB_MICRO_PATH', 'SB_WORKSPACE']:
Yifan Xiong's avatar
Yifan Xiong committed
111
                        self._sb_benchmarks[name].modes[idx].env.setdefault(key, None)
112
113
                    if 'pattern' in mode:
                        if mode.pattern.type == 'topo-aware' and 'ibstat' not in mode.pattern:
114
115
116
                            self._sb_benchmarks[name].modes[idx].pattern.ibstat = gen_ibstat(
                                self._ansible_config, str(self._output_path / 'ibstate_file.txt')
                            )
117

118
119
120
121
122
123
    def __get_enabled_benchmarks(self):
        """Get enabled benchmarks list.

        Return:
            list: List of benchmarks which will be executed.
        """
124
        if 'enable' in self._sb_config.superbench and self._sb_config.superbench.enable:
125
126
127
128
            if isinstance(self._sb_config.superbench.enable, str):
                return [self._sb_config.superbench.enable]
            elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):
                return list(self._sb_config.superbench.enable)
129
        return [k for k, v in self._sb_benchmarks.items() if 'enable' in v and v.enable]
130

131
    def __get_mode_command(self, benchmark_name, mode, timeout=None):
132
133
134
        """Get runner command for given mode.

        Args:
135
            benchmark_name (str): Benchmark name.
136
            mode (DictConfig): Runner mode.
137
            timeout (int): The timeout value in seconds.
138
            host_list (list): The specified Host node list.
139
140
141
142

        Return:
            str: Runner command.
        """
143
144
145
146
        exec_command = ('sb exec --output-dir {output_dir} -c sb.config.yaml -C superbench.enable={name}').format(
            name=benchmark_name,
            output_dir=self._sb_output_dir,
        )
147
148
149
        if timeout is not None:
            exec_command = 'timeout {timeout} {command}'.format(timeout=timeout, command=exec_command)

150
151
152
153
        # Enable nsys profiling based on environment variable
        enable_nsys = os.environ.get('SB_ENABLE_NSYS', '') == '1'
        trace_dir = os.environ.get('SB_NSYS_TRACE_DIR', self._sb_output_dir)

154
155
        mode_command = exec_command
        if mode.name == 'local':
156
157
158
159
160
161
            trace_command = (
                f'nsys profile --output {trace_dir}/{benchmark_name}_{mode.proc_rank}_traces '
                f'--backtrace none --sample none --force-overwrite true --cpuctxsw none --trace cuda,nvtx '
            ) if enable_nsys and mode.proc_rank == 0 else ''
            # Build the command parts, only including trace if it's not empty
            command_parts = []
162
            setup_command, numactl_command = get_local_numactl_command(mode)
163
164
165
            prefix = mode.prefix.format(proc_rank=mode.proc_rank, proc_num=mode.proc_num)
            if prefix:
                command_parts.append(prefix)
166
167
            if numactl_command:
                command_parts.append(numactl_command)
168
169
170
171
172
            if trace_command:
                command_parts.append(trace_command)
            command_parts.append(exec_command)
            mode_command = ' '.join(command_parts)
            mode_command = f'PROC_RANK={mode.proc_rank} {mode_command}'
173
174
            if setup_command:
                mode_command = f'{setup_command} && {mode_command}'
175
        elif mode.name == 'torch.distributed':
176
177
            # TODO: replace with torch.distributed.run in v1.9
            # TODO: only supports node_num=1 and node_num=all currently
178
179
            torch_dist_params = (
                '' if 'node_num' in mode and mode.node_num == 1 else
180
                '--nnodes=$NNODES --node_rank=$NODE_RANK --master_addr=$MASTER_ADDR --master_port=$MASTER_PORT '
181
182
183
184
185
186
187
            )

            nsys_prefix = (
                f'nsys profile --output {trace_dir}/{benchmark_name}_traces '
                f'--backtrace none --sample none --force-overwrite true --cpuctxsw none --trace cuda,nvtx '
            ) if enable_nsys else ''

188
            mode_command = (
189
                f'{nsys_prefix}'
190
191
                f'torchrun'
                f' --no_python --nproc_per_node={mode.proc_num} {torch_dist_params}{exec_command}'
192
193
                f' superbench.benchmarks.{benchmark_name}.parameters.distributed_impl=ddp'
                f' superbench.benchmarks.{benchmark_name}.parameters.distributed_backend=nccl'
194
            )
Yifan Xiong's avatar
Yifan Xiong committed
195
        elif mode.name == 'mpi':
196
197
198
199
            trace_command = (
                f'nsys profile --output {trace_dir}/{benchmark_name}_{mode.proc_rank}_traces '
                f'--backtrace none --sample none --force-overwrite true --cpuctxsw none --trace cuda,nvtx '
            ) if enable_nsys else ''
Yifan Xiong's avatar
Yifan Xiong committed
200
            mode_command = (
201
                '{trace} '
Yifan Xiong's avatar
Yifan Xiong committed
202
203
204
                'mpirun '    # use default OpenMPI in image
                '-tag-output '    # tag mpi output with [jobid,rank]<stdout/stderr> prefix
                '-allow-run-as-root '    # allow mpirun to run when executed by root user
205
                '{host_list} '    # use prepared hostfile or specify nodes and launch {proc_num} processes on each node
one's avatar
one committed
206
                '-bind-to {bind_to} '    # bind processes according to mode config
Yifan Xiong's avatar
Yifan Xiong committed
207
208
                '{mca_list} {env_list} {command}'
            ).format(
209
                trace=trace_command,
210
211
                host_list=f'-host localhost:{mode.proc_num}' if 'node_num' in mode and mode.node_num == 1 else
                f'-hostfile hostfile -map-by ppr:{mode.proc_num}:node' if 'host_list' not in mode else '-host ' +
212
                ','.join(f'{host}:{mode.proc_num}' for host in mode.host_list),
one's avatar
one committed
213
                bind_to=mode.bind_to,
Yifan Xiong's avatar
Yifan Xiong committed
214
                mca_list=' '.join(f'-mca {k} {v}' for k, v in mode.mca.items()),
215
                env_list=' '.join(self.__format_mpi_env_args(mode.env, mode.proc_rank, mode.proc_num)),
Yifan Xiong's avatar
Yifan Xiong committed
216
217
218
219
                command=exec_command,
            )
        else:
            logger.warning('Unknown mode %s.', mode.name)
220
        return mode_command.strip()
221

222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
    def __format_mode_env_value(self, value, proc_rank, proc_num):
        """Format mode env value.

        Args:
            value: Env value from config.
            proc_rank (int): Process rank.
            proc_num (int): Process count.

        Returns:
            str | None: Formatted value, or None to export existing env only.
        """
        if value is None:
            return None
        if isinstance(value, str):
            return value.format(proc_rank=proc_rank, proc_num=proc_num)
        if isinstance(value, (int, float, bool)):
            return str(value)
        raise ValueError(f'Unsupported env value type: {type(value)}')

    def __format_mpi_env_args(self, env_dict, proc_rank, proc_num):
        """Format env args for mpirun.

        Args:
            env_dict (DictConfig): Mode env config.
            proc_rank (int): Process rank.
            proc_num (int): Process count.

        Returns:
            list[str]: Formatted mpirun env args.
        """
        env_args = []
        for key, value in env_dict.items():
            formatted_value = self.__format_mode_env_value(value, proc_rank, proc_num)
            env_args.append(
                f'-x {key}' if formatted_value is None else f'-x {self.__quote_env_assignment(key, formatted_value)}'
            )
        return env_args

    def __quote_env_assignment(self, key, value):
        """Quote env assignment for shell command composition.

        Use double quotes so the result can be embedded inside the existing
        bash -lc '...' wrapper without breaking the outer single quotes.

        Args:
            key (str): Env key.
            value (str): Env value.

        Returns:
            str: Double-quoted KEY=value assignment.
        """
        escaped = str(value).replace('\\', '\\\\').replace('"', '\\"').replace('$', '\\$').replace('`', '\\`')
        return f'"{key}={escaped}"'

276
277
278
279
280
281
282
283
    def get_failure_count(self):
        """Get failure count during Ansible run.

        Return:
            int: Failure count.
        """
        return self._ansible_client.failure_count

284
285
286
287
288
    def deploy(self):    # pragma: no cover
        """Deploy SuperBench environment."""
        logger.info('Preparing SuperBench environment.')
        extravars = {
            'ssh_port': random.randint(1 << 14, (1 << 15) - 1),
289
            'output_dir': str(self._output_path),
290
            'container': self._container_name,
291
            'docker_image': self._docker_config.image,
292
            'docker_pull': bool(self._docker_config.pull),
293
294
295
296
297
298
299
300
301
302
303
        }
        if bool(self._docker_config.username) and bool(self._docker_config.password):
            extravars.update(
                {
                    'docker_registry': self._docker_config.registry,
                    'docker_username': self._docker_config.username,
                    'docker_password': self._docker_config.password,
                }
            )
        self._ansible_client.run(self._ansible_client.get_playbook_config('deploy.yaml', extravars=extravars))

304
305
306
307
308
309
    def run_sys_info(self):
        """Run the system info on all nodes."""
        self.check_env()

        logger.info('Runner is going to get node system info.')

310
        fcmd = "docker exec {container} bash -lc '{{command}}'".format(container=self._container_name)
311
312
313

        if 'skip' not in self._docker_config:
            self._docker_config.skip = False
314
315
316
317
318
319
320
321
322
323
324
        if self._docker_config.skip:
            fcmd = "bash -c 'cd $SB_WORKSPACE && {command}'"
        ansible_runner_config = self._ansible_client.get_shell_config(
            fcmd.format(command='sb node info --output-dir {output_dir}'.format(output_dir=self._sb_output_dir))
        )
        ansible_rc = self._ansible_client.run(ansible_runner_config, sudo=(not self._docker_config.skip))

        if ansible_rc != 0:
            self.cleanup()
        self.fetch_results()

325
326
327
    def check_env(self):    # pragma: no cover
        """Check SuperBench environment."""
        logger.info('Checking SuperBench environment.')
328
        OmegaConf.save(config=self._sb_config, f=str(self._output_path / 'sb.config.yaml'))
329
        self._ansible_client.run(
330
331
332
            self._ansible_client.get_playbook_config(
                'check_env.yaml',
                extravars={
333
                    'container': self._container_name,
334
                    'no_docker': False if 'skip' not in self._docker_config else self._docker_config.skip,
335
                    'output_dir': str(self._output_path),
336
337
338
                    'env': '\n'.join(f'{k}={v}' for k, v in self._sb_config.superbench.env.items()),
                }
            )
339
340
        )

341
342
343
344
    def cleanup(self):    # pragma: no cover
        """Cleanup remaining processes on all nodes."""
        self._ansible_client.run(self._ansible_client.get_playbook_config('cleanup.yaml'))

345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
    def fetch_results(self):    # pragma: no cover
        """Fetch benchmark results on all nodes."""
        try:
            (self._output_path / 'nodes').mkdir(mode=0o755, parents=True, exist_ok=True)
        except Exception:
            logger.exception('Failed to create directory %s.', str(self._output_path / 'nodes'))
            raise
        self._ansible_client.run(
            self._ansible_client.get_playbook_config(
                'fetch_results.yaml',
                extravars={
                    'sb_output_dir': self._sb_output_dir,
                    'absolute_output_dir': str(self._output_path),
                }
            )
        )

362
363
364
365
366
367
368
369
370
371
372
373
    def __signal_handler(self, signum, frame):
        """Signal handler for runner.

        Args:
            signum (int): Signal number.
            frame (FrameType): Timeout frame.
        """
        if signum == signal.SIGINT or signum == signal.SIGTERM:
            logger.info('Killed by %s, exiting ...', signal.Signals(signum).name)
            self.cleanup()
            sys.exit(128 + signum)

374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
    def __create_results_summary(self):    # pragma: no cover
        """Create the result summary file of all nodes."""
        all_results = list()
        for node_path in (self._output_path / 'nodes').glob('*'):
            if not node_path.is_dir():
                continue
            results_summary = self.__create_single_node_summary(node_path)
            results_summary['node'] = node_path.name
            all_results.append(results_summary)

        with (self._output_path / 'results-summary.jsonl').open(mode='w') as f:
            for result in all_results:
                json.dump(result, f)
                f.write('\n')

389
    def __create_single_node_summary(self, node_path):    # pragma: no cover # noqa: C901
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
        """Create the result summary file of single node.

        Args:
            node_path (Path): The Path instance of node directory.

        Returns:
            dict: Result summary of single node.
        """
        results_summary = dict()
        reduce_ops = dict()
        file_list = [Path(f) for f in natsorted([str(f) for f in node_path.glob('**/results.json')])]
        for results_file in file_list:
            with results_file.open() as f:
                try:
                    results = json.load(f)
                except ValueError:
                    logger.error('Invalid JSON file: {}'.format(results_file))
                    continue

                for result in results:
410
411
412
413
414
                    try:
                        benchmark_name = result['name']
                    except Exception:
                        logger.error('Invalid content in JSON file: {}'.format(results_file))
                        continue
415
416
417
418
419
420
421
422
423
424
425
426
                    if benchmark_name not in results_summary:
                        results_summary[benchmark_name] = defaultdict(list)
                    for metric in result['result']:
                        metric_name = '{}/{}'.format(benchmark_name, metric)
                        if metric_name not in reduce_ops:
                            reduce_ops[metric_name] = result['reduce_op'][metric]
                        elif reduce_ops[metric_name] != result['reduce_op'][metric]:
                            logger.error('Inconsistent reduce type for metric: {}'.format(metric_name))
                            continue

                        results_summary[benchmark_name][metric].append(result['result'][metric])

427
428
429
        results_summary = self.__merge_benchmark_metrics(results_summary, reduce_ops)
        monitor_summary = self.__merge_monitor_metrics(node_path)
        results_summary = {**results_summary, **monitor_summary}
430

431
432
433
434
435
        with (node_path / 'results-summary.json').open(mode='w') as f:
            json.dump(results_summary, f, indent=2)

        return results_summary

436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
    def __generate_metric_name(self, benchmark_name, metric, rank_count, run_count, curr_rank, curr_run):
        """Generate the summarized metrics name.

        The format of metric name is:
               {benchmark_name}/[{run_count}/]{metric_name}[:rank]
        [run_count] and [rank] parts are optional.

        Args:
            benchmark_name (str): The benchmark name.
            metric (str): The metric name.
            rank_count (int): The total count of rank.
            run_count (int): The total count of benchmarking.
            curr_rank (int): The current rank index.
            curr_run (int): The current run index.

        Returns:
            dict: Flattened result with metric as key.
        """
        metric_name = benchmark_name
        if run_count > 1:
            metric_name = '{}/{}'.format(metric_name, curr_run)
        metric_name = '{}/{}'.format(metric_name, metric)
        if rank_count > 1:
            metric_name = '{}:{}'.format(metric_name, curr_rank)

        return metric_name

463
    def __merge_benchmark_metrics(self, results_summary, reduce_ops):
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
        """Merge metrics of all benchmarks in one node.

        Args:
            results_summary (dict): Summarized result of one node.
            reduce_ops (dict): The reduce type of each metric.

        Returns:
            dict: Flattened result with metric as key.
        """
        metrics_summary = dict()
        for benchmark_name in results_summary:
            for metric in results_summary[benchmark_name]:
                metric_name = '{}/{}'.format(benchmark_name, metric)
                if metric_name not in reduce_ops or (
                    reduce_ops[metric_name] is not None and reduce_ops[metric_name] not in ReduceType.get_values()
                ):
                    logger.error('Unknown reduce type for metric: {}'.format(metric_name))
                    continue

                if reduce_ops[metric_name] is not None:
                    reduce_func = Reducer.get_reduce_func(ReduceType(reduce_ops[metric_name]))
                    values = [reduce_func(list(result)) for result in zip(*results_summary[benchmark_name][metric])]
486
487
488
                    for run in range(len(values)):
                        metric_name = self.__generate_metric_name(benchmark_name, metric, 1, len(values), 0, run)
                        metrics_summary[metric_name] = values[run]
489
                else:
490
491
492
493
494
495
496
497
                    rank_count = len(results_summary[benchmark_name][metric])
                    for rank, rank_value in enumerate(results_summary[benchmark_name][metric]):
                        run_count = len(rank_value)
                        for run, run_value in enumerate(rank_value):
                            metric_name = self.__generate_metric_name(
                                benchmark_name, metric, rank_count, run_count, rank, run
                            )
                            metrics_summary[metric_name] = run_value
498
499
500

        return metrics_summary

501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
    def __merge_monitor_metrics(self, node_path):
        """Merge and summarize monitor metrics of one node.

        Args:
            node_path (Path): The Path instance of node directory.

        Returns:
            dict: Flattened result with metric as key.
        """
        metrics_summary = dict()
        all_samples = list()
        file_list = list(node_path.glob('**/monitor.jsonl'))
        for results_file in file_list:
            try:
                with jsonlines.open(results_file) as reader:
                    all_samples = list(reader)
            except BaseException as e:
                logger.error('Invalid Jsonline file: {}, error message: {}'.format(results_file, str(e)))
                continue
        all_samples = sorted(all_samples, key=lambda k: k.get('time', '0'))
        metrics_dict = dict()
        for sample in all_samples:
            for metric, value in sample.items():
                if metric not in metrics_dict:
                    metrics_dict[metric] = list()
                metrics_dict[metric].append(value)

        for metric, values in metrics_dict.items():
529
            prefix = metric.split(':')[0]
530
            for pattern, reduce_type in MonitorRecord.reduce_ops.items():
531
                if pattern == prefix:
532
                    reduce_func = Reducer.get_reduce_func(reduce_type)
533
534
                    metric_name = 'monitor/{}'.format(metric)
                    metrics_summary[metric_name] = reduce_func(values)
535
536
537
538
                    continue

        return metrics_summary

one's avatar
one committed
539
    def _run_proc(self, benchmark_name, mode, vars):    # noqa: C901
540
541
542
543
544
545
546
547
548
549
550
        """Run the process.

        Args:
            benchmark_name (str): Benchmark name.
            mode (DictConfig): Runner mode.
            vars (dict): Process variables.

        Returns:
            int: Process return code.
        """
        mode.update(vars)
551
        if mode.name == 'mpi' and 'pattern' in mode:
552
            mode.env.update({'SB_MODE_SERIAL_INDEX': mode.serial_index, 'SB_MODE_PARALLEL_INDEX': mode.parallel_index})
553
        logger.info('Runner is going to run %s in %s mode, proc rank %d.', benchmark_name, mode.name, mode.proc_rank)
554

555
        timeout = self._sb_benchmarks[benchmark_name].get('timeout', None)
556
        if isinstance(timeout, int):
557
            timeout = max(timeout, 60)
558

559
560
        if 'skip' not in self._docker_config:
            self._docker_config.skip = False
one's avatar
one committed
561
562
        base_env_cmd = 'set -o allexport && source /root/sb.env && set +o allexport'
        mode_env_cmds = []
563
        if self._docker_config.skip:
one's avatar
one committed
564
            base_env_cmd = 'set -o allexport && source /tmp/sb.env && set +o allexport'
565
        for k, v in mode.env.items():
566
567
568
            formatted_value = self.__format_mode_env_value(v, mode.proc_rank, mode.proc_num)
            if formatted_value is not None:
                envvar = self.__quote_env_assignment(k, formatted_value)
one's avatar
one committed
569
                mode_env_cmds.append(f'export {envvar}')
570

one's avatar
one committed
571
572
573
574
        env_list = base_env_cmd
        if mode_env_cmds:
            env_list = f"{env_list} && {' && '.join(mode_env_cmds)}"

575
        fcmd = "docker exec {container} bash -lc '{{env_list}} && {{command}}'".format(container=self._container_name)
576
        if self._docker_config.skip:
577
            fcmd = "bash -c '{env_list} && cd $SB_WORKSPACE && {command}'"
Yifan Xiong's avatar
Yifan Xiong committed
578
        ansible_runner_config = self._ansible_client.get_shell_config(
579
            fcmd.format(env_list=env_list, command=self.__get_mode_command(benchmark_name, mode, timeout))
580
        )
581
        if mode.name == 'mpi' and 'node_num' in mode and mode.node_num != 1:
Yifan Xiong's avatar
Yifan Xiong committed
582
            ansible_runner_config = self._ansible_client.update_mpi_config(ansible_runner_config)
583

584
585
        if isinstance(timeout, int):
            # we do not expect timeout in ansible unless subprocess hangs
586
            ansible_runner_config['timeout'] = timeout + 60
587

588
589
590
591
        # overwrite ansible runner's default signal handler with main process's
        rc = self._ansible_client.run(
            ansible_runner_config, cancel_callback=lambda: None, sudo=(not self._docker_config.skip)
        )
592
593
        return rc

594
    def run(self):
595
596
        """Run the SuperBench benchmarks distributedly."""
        self.check_env()
597
598
        signal.signal(signal.SIGINT, self.__signal_handler)
        signal.signal(signal.SIGTERM, self.__signal_handler)
599
600
601
602
        for benchmark_name in self._sb_benchmarks:
            if benchmark_name not in self._sb_enabled_benchmarks:
                continue
            benchmark_config = self._sb_benchmarks[benchmark_name]
603
            for mode in benchmark_config.modes:
604
                ansible_rc = 0
605
                if mode.name == 'local':
606
                    rc_list = Parallel(n_jobs=mode.proc_num if mode.parallel else 1)(
607
608
609
                        delayed(self._run_proc)(benchmark_name, mode, {
                            'proc_rank': proc_rank
                        }) for proc_rank in range(mode.proc_num)
610
                    )
611
                    ansible_rc = sum(rc_list)
Yifan Xiong's avatar
Yifan Xiong committed
612
                elif mode.name == 'torch.distributed' or mode.name == 'mpi':
613
                    if 'pattern' not in mode:
614
615
                        ansible_rc = self._run_proc(benchmark_name, mode, {'proc_rank': 0})
                    else:
616
617
618
                        if not os.path.exists(self._output_path / 'hostfile'):
                            logger.warning('No hostfile under %s.', self._output_path)
                            continue
619
620
                        with open(self._output_path / 'hostfile', 'r') as f:
                            host_list = f.read().splitlines()
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
                        host_groups = gen_traffic_pattern_host_groups(
                            host_list, mode.pattern, self._output_path / 'mpi_pattern.txt', benchmark_name
                        )
                        for serial_index, host_group in enumerate(host_groups):
                            para_rc_list = Parallel(n_jobs=len(host_group))(
                                delayed(self._run_proc)(
                                    benchmark_name,
                                    mode,
                                    vars={
                                        'proc_rank': 0,
                                        'host_list': host_list,
                                        'serial_index': str(serial_index),
                                        'parallel_index': str(parallel_index),
                                    }
                                ) for parallel_index, host_list in enumerate(host_group)
636
637
                            )
                            ansible_rc = ansible_rc + sum(para_rc_list)
Yifan Xiong's avatar
Yifan Xiong committed
638
639
                else:
                    logger.warning('Unknown mode %s.', mode.name)
640
641
                if ansible_rc != 0:
                    self.cleanup()
642
            self.fetch_results()
643
644

        self.__create_results_summary()