runner.py 22 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""SuperBench Runner."""

6
import os
7
import json
8
import random
9
from pathlib import Path
10
from pprint import pformat
11
from collections import defaultdict
12

13
import jsonlines
14
from natsort import natsorted
15
from joblib import Parallel, delayed
16
17
from omegaconf import ListConfig, OmegaConf

18
from superbench.common.utils import SuperBenchLogger, logger, gen_ibstat, gen_traffic_pattern_host_groups
19
from superbench.common.utils.lazy_import import LazyImport
20
from superbench.benchmarks import ReduceType, Reducer
21
from superbench.monitor import MonitorRecord
22

23
24
AnsibleClient = LazyImport('superbench.runner.ansible', 'AnsibleClient')

25
26
27

class SuperBenchRunner():
    """SuperBench runner class."""
28
    def __init__(self, sb_config, docker_config, ansible_config, sb_output_dir):
29
30
31
32
33
34
        """Initilize.

        Args:
            sb_config (DictConfig): SuperBench config object.
            docker_config (DictConfig): Docker config object.
            ansible_config (DictConfig): Ansible config object.
35
            sb_output_dir (str): SuperBench output directory.
36
37
38
39
        """
        self._sb_config = sb_config
        self._docker_config = docker_config
        self._ansible_config = ansible_config
40
41
        self._sb_output_dir = sb_output_dir
        self._output_path = Path(sb_output_dir).expanduser().resolve()
42
        self._ansible_client = AnsibleClient(ansible_config)
43
44

        self.__set_logger('sb-run.log')
45
        logger.info('Runner uses config: %s.', pformat(OmegaConf.to_container(self._sb_config, resolve=True)))
46
        logger.info('Runner writes to: %s.', str(self._output_path))
47

48
        self._sb_benchmarks = self._sb_config.superbench.benchmarks
49
        self.__validate_sb_config()
50
51
52
        self._sb_enabled_benchmarks = self.__get_enabled_benchmarks()
        logger.info('Runner will run: %s', self._sb_enabled_benchmarks)

53
54
55
56
57
58
    def __set_logger(self, filename):
        """Set logger and add file handler.

        Args:
            filename (str): Log file name.
        """
59
        SuperBenchLogger.add_handler(logger.logger, filename=str(self._output_path / filename))
60

Yifan Xiong's avatar
Yifan Xiong committed
61
    def __validate_sb_config(self):    # noqa: C901
62
63
64
65
66
67
        """Validate SuperBench config object.

        Raise:
            InvalidConfigError: If input config is invalid.
        """
        # TODO: add validation and defaulting
68
69
        if not self._sb_config.superbench.env:
            self._sb_config.superbench.env = {}
70
71
72
73
        for name in self._sb_benchmarks:
            if not self._sb_benchmarks[name].modes:
                self._sb_benchmarks[name].modes = []
            for idx, mode in enumerate(self._sb_benchmarks[name].modes):
74
75
                if not mode.env:
                    self._sb_benchmarks[name].modes[idx].env = {}
76
77
78
79
80
81
82
83
                if mode.name == 'local':
                    if not mode.proc_num:
                        self._sb_benchmarks[name].modes[idx].proc_num = 1
                    if not mode.prefix:
                        self._sb_benchmarks[name].modes[idx].prefix = ''
                elif mode.name == 'torch.distributed':
                    if not mode.proc_num:
                        self._sb_benchmarks[name].modes[idx].proc_num = 8
Yifan Xiong's avatar
Yifan Xiong committed
84
85
86
87
88
89
90
91
                elif mode.name == 'mpi':
                    if not mode.mca:
                        self._sb_benchmarks[name].modes[idx].mca = {
                            'pml': 'ob1',
                            'btl': '^openib',
                            'btl_tcp_if_exclude': 'lo,docker0',
                            'coll_hcoll_enable': 0,
                        }
92
                    for key in ['PATH', 'LD_LIBRARY_PATH', 'SB_MICRO_PATH', 'SB_WORKSPACE']:
Yifan Xiong's avatar
Yifan Xiong committed
93
                        self._sb_benchmarks[name].modes[idx].env.setdefault(key, None)
94
95
96
97
98
                    if mode.pattern:
                        if mode.pattern.type == 'topo-aware' and not mode.pattern.ibstat:
                            self._sb_benchmarks[name].modes[idx].pattern.ibstat = gen_ibstat(
                                self._ansible_config, str(self._output_path / 'ibstate_file.txt')
                            )
99

100
101
102
103
104
105
106
107
108
109
110
111
112
    def __get_enabled_benchmarks(self):
        """Get enabled benchmarks list.

        Return:
            list: List of benchmarks which will be executed.
        """
        if self._sb_config.superbench.enable:
            if isinstance(self._sb_config.superbench.enable, str):
                return [self._sb_config.superbench.enable]
            elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):
                return list(self._sb_config.superbench.enable)
        return [k for k, v in self._sb_benchmarks.items() if v.enable]

113
    def __get_mode_command(self, benchmark_name, mode, timeout=None):
114
115
116
        """Get runner command for given mode.

        Args:
117
            benchmark_name (str): Benchmark name.
118
            mode (DictConfig): Runner mode.
119
            timeout (int): The timeout value in seconds.
120
            host_list (list): The specified Host node list.
121
122
123
124

        Return:
            str: Runner command.
        """
125
126
127
128
        exec_command = ('sb exec --output-dir {output_dir} -c sb.config.yaml -C superbench.enable={name}').format(
            name=benchmark_name,
            output_dir=self._sb_output_dir,
        )
129
130
131
        if timeout is not None:
            exec_command = 'timeout {timeout} {command}'.format(timeout=timeout, command=exec_command)

132
133
134
135
136
137
        mode_command = exec_command
        if mode.name == 'local':
            mode_command = '{prefix} {command}'.format(
                prefix=mode.prefix.format(proc_rank=mode.proc_rank, proc_num=mode.proc_num),
                command=exec_command,
            )
138
            mode_command = f'PROC_RANK={mode.proc_rank} {mode_command.strip()}'
139
        elif mode.name == 'torch.distributed':
140
141
            # TODO: replace with torch.distributed.run in v1.9
            # TODO: only supports node_num=1 and node_num=all currently
142
143
            torch_dist_params = '' if mode.node_num == 1 else \
                '--nnodes=$NNODES --node_rank=$NODE_RANK --master_addr=$MASTER_ADDR --master_port=$MASTER_PORT '
144
            mode_command = (
145
146
147
148
                f'python3 -m torch.distributed.launch'
                f' --use_env --no_python --nproc_per_node={mode.proc_num} {torch_dist_params}{exec_command}'
                f' superbench.benchmarks.{benchmark_name}.parameters.distributed_impl=ddp'
                f' superbench.benchmarks.{benchmark_name}.parameters.distributed_backend=nccl'
149
            )
Yifan Xiong's avatar
Yifan Xiong committed
150
151
152
153
154
        elif mode.name == 'mpi':
            mode_command = (
                'mpirun '    # use default OpenMPI in image
                '-tag-output '    # tag mpi output with [jobid,rank]<stdout/stderr> prefix
                '-allow-run-as-root '    # allow mpirun to run when executed by root user
155
                '{host_list} '    # use prepared hostfile or specify nodes and launch {proc_num} processes on each node
Yifan Xiong's avatar
Yifan Xiong committed
156
157
158
                '-bind-to numa '    # bind processes to numa
                '{mca_list} {env_list} {command}'
            ).format(
159
160
161
                host_list=f'-host localhost:{mode.proc_num}' if mode.node_num == 1 else
                f'-hostfile hostfile -map-by ppr:{mode.proc_num}:node' if mode.host_list is None else '-host ' +
                ','.join(f'{host}:{mode.proc_num}' for host in mode.host_list),
Yifan Xiong's avatar
Yifan Xiong committed
162
                mca_list=' '.join(f'-mca {k} {v}' for k, v in mode.mca.items()),
163
164
165
166
                env_list=' '.join(
                    f'-x {k}={str(v).format(proc_rank=mode.proc_rank, proc_num=mode.proc_num)}'
                    if isinstance(v, str) else f'-x {k}' for k, v in mode.env.items()
                ),
Yifan Xiong's avatar
Yifan Xiong committed
167
168
169
170
                command=exec_command,
            )
        else:
            logger.warning('Unknown mode %s.', mode.name)
171
        return mode_command.strip()
172

173
174
175
176
177
178
179
180
    def get_failure_count(self):
        """Get failure count during Ansible run.

        Return:
            int: Failure count.
        """
        return self._ansible_client.failure_count

181
182
183
184
185
    def deploy(self):    # pragma: no cover
        """Deploy SuperBench environment."""
        logger.info('Preparing SuperBench environment.')
        extravars = {
            'ssh_port': random.randint(1 << 14, (1 << 15) - 1),
186
            'output_dir': str(self._output_path),
187
            'docker_image': self._docker_config.image,
188
            'docker_pull': bool(self._docker_config.pull),
189
190
191
192
193
194
195
196
197
198
199
200
201
202
        }
        if bool(self._docker_config.username) and bool(self._docker_config.password):
            extravars.update(
                {
                    'docker_registry': self._docker_config.registry,
                    'docker_username': self._docker_config.username,
                    'docker_password': self._docker_config.password,
                }
            )
        self._ansible_client.run(self._ansible_client.get_playbook_config('deploy.yaml', extravars=extravars))

    def check_env(self):    # pragma: no cover
        """Check SuperBench environment."""
        logger.info('Checking SuperBench environment.')
203
        OmegaConf.save(config=self._sb_config, f=str(self._output_path / 'sb.config.yaml'))
204
        self._ansible_client.run(
205
206
207
            self._ansible_client.get_playbook_config(
                'check_env.yaml',
                extravars={
208
                    'no_docker': bool(self._docker_config.skip),
209
                    'output_dir': str(self._output_path),
210
211
212
                    'env': '\n'.join(f'{k}={v}' for k, v in self._sb_config.superbench.env.items()),
                }
            )
213
214
        )

215
216
217
218
    def cleanup(self):    # pragma: no cover
        """Cleanup remaining processes on all nodes."""
        self._ansible_client.run(self._ansible_client.get_playbook_config('cleanup.yaml'))

219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
    def fetch_results(self):    # pragma: no cover
        """Fetch benchmark results on all nodes."""
        try:
            (self._output_path / 'nodes').mkdir(mode=0o755, parents=True, exist_ok=True)
        except Exception:
            logger.exception('Failed to create directory %s.', str(self._output_path / 'nodes'))
            raise
        self._ansible_client.run(
            self._ansible_client.get_playbook_config(
                'fetch_results.yaml',
                extravars={
                    'sb_output_dir': self._sb_output_dir,
                    'absolute_output_dir': str(self._output_path),
                }
            )
        )

236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
    def __create_results_summary(self):    # pragma: no cover
        """Create the result summary file of all nodes."""
        all_results = list()
        for node_path in (self._output_path / 'nodes').glob('*'):
            if not node_path.is_dir():
                continue
            results_summary = self.__create_single_node_summary(node_path)
            results_summary['node'] = node_path.name
            all_results.append(results_summary)

        with (self._output_path / 'results-summary.jsonl').open(mode='w') as f:
            for result in all_results:
                json.dump(result, f)
                f.write('\n')

251
    def __create_single_node_summary(self, node_path):    # pragma: no cover # noqa: C901
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
        """Create the result summary file of single node.

        Args:
            node_path (Path): The Path instance of node directory.

        Returns:
            dict: Result summary of single node.
        """
        results_summary = dict()
        reduce_ops = dict()
        file_list = [Path(f) for f in natsorted([str(f) for f in node_path.glob('**/results.json')])]
        for results_file in file_list:
            with results_file.open() as f:
                try:
                    results = json.load(f)
                except ValueError:
                    logger.error('Invalid JSON file: {}'.format(results_file))
                    continue

                for result in results:
272
273
274
275
276
                    try:
                        benchmark_name = result['name']
                    except Exception:
                        logger.error('Invalid content in JSON file: {}'.format(results_file))
                        continue
277
278
279
280
281
282
283
284
285
286
287
288
                    if benchmark_name not in results_summary:
                        results_summary[benchmark_name] = defaultdict(list)
                    for metric in result['result']:
                        metric_name = '{}/{}'.format(benchmark_name, metric)
                        if metric_name not in reduce_ops:
                            reduce_ops[metric_name] = result['reduce_op'][metric]
                        elif reduce_ops[metric_name] != result['reduce_op'][metric]:
                            logger.error('Inconsistent reduce type for metric: {}'.format(metric_name))
                            continue

                        results_summary[benchmark_name][metric].append(result['result'][metric])

289
290
291
        results_summary = self.__merge_benchmark_metrics(results_summary, reduce_ops)
        monitor_summary = self.__merge_monitor_metrics(node_path)
        results_summary = {**results_summary, **monitor_summary}
292
293
294
295
296
        with (node_path / 'results-summary.json').open(mode='w') as f:
            json.dump(results_summary, f, indent=2)

        return results_summary

297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
    def __generate_metric_name(self, benchmark_name, metric, rank_count, run_count, curr_rank, curr_run):
        """Generate the summarized metrics name.

        The format of metric name is:
               {benchmark_name}/[{run_count}/]{metric_name}[:rank]
        [run_count] and [rank] parts are optional.

        Args:
            benchmark_name (str): The benchmark name.
            metric (str): The metric name.
            rank_count (int): The total count of rank.
            run_count (int): The total count of benchmarking.
            curr_rank (int): The current rank index.
            curr_run (int): The current run index.

        Returns:
            dict: Flattened result with metric as key.
        """
        metric_name = benchmark_name
        if run_count > 1:
            metric_name = '{}/{}'.format(metric_name, curr_run)
        metric_name = '{}/{}'.format(metric_name, metric)
        if rank_count > 1:
            metric_name = '{}:{}'.format(metric_name, curr_rank)

        return metric_name

324
    def __merge_benchmark_metrics(self, results_summary, reduce_ops):
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
        """Merge metrics of all benchmarks in one node.

        Args:
            results_summary (dict): Summarized result of one node.
            reduce_ops (dict): The reduce type of each metric.

        Returns:
            dict: Flattened result with metric as key.
        """
        metrics_summary = dict()
        for benchmark_name in results_summary:
            for metric in results_summary[benchmark_name]:
                metric_name = '{}/{}'.format(benchmark_name, metric)
                if metric_name not in reduce_ops or (
                    reduce_ops[metric_name] is not None and reduce_ops[metric_name] not in ReduceType.get_values()
                ):
                    logger.error('Unknown reduce type for metric: {}'.format(metric_name))
                    continue

                if reduce_ops[metric_name] is not None:
                    reduce_func = Reducer.get_reduce_func(ReduceType(reduce_ops[metric_name]))
                    values = [reduce_func(list(result)) for result in zip(*results_summary[benchmark_name][metric])]
347
348
349
                    for run in range(len(values)):
                        metric_name = self.__generate_metric_name(benchmark_name, metric, 1, len(values), 0, run)
                        metrics_summary[metric_name] = values[run]
350
                else:
351
352
353
354
355
356
357
358
                    rank_count = len(results_summary[benchmark_name][metric])
                    for rank, rank_value in enumerate(results_summary[benchmark_name][metric]):
                        run_count = len(rank_value)
                        for run, run_value in enumerate(rank_value):
                            metric_name = self.__generate_metric_name(
                                benchmark_name, metric, rank_count, run_count, rank, run
                            )
                            metrics_summary[metric_name] = run_value
359
360
361

        return metrics_summary

362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
    def __merge_monitor_metrics(self, node_path):
        """Merge and summarize monitor metrics of one node.

        Args:
            node_path (Path): The Path instance of node directory.

        Returns:
            dict: Flattened result with metric as key.
        """
        metrics_summary = dict()
        all_samples = list()
        file_list = list(node_path.glob('**/monitor.jsonl'))
        for results_file in file_list:
            try:
                with jsonlines.open(results_file) as reader:
                    all_samples = list(reader)
            except BaseException as e:
                logger.error('Invalid Jsonline file: {}, error message: {}'.format(results_file, str(e)))
                continue
        all_samples = sorted(all_samples, key=lambda k: k.get('time', '0'))
        metrics_dict = dict()
        for sample in all_samples:
            for metric, value in sample.items():
                if metric not in metrics_dict:
                    metrics_dict[metric] = list()
                metrics_dict[metric].append(value)

        for metric, values in metrics_dict.items():
390
            prefix = metric.split(':')[0]
391
            for pattern, reduce_type in MonitorRecord.reduce_ops.items():
392
                if pattern == prefix:
393
                    reduce_func = Reducer.get_reduce_func(reduce_type)
394
395
                    metric_name = 'monitor/{}'.format(metric)
                    metrics_summary[metric_name] = reduce_func(values)
396
397
398
399
                    continue

        return metrics_summary

400
401
402
403
404
405
406
407
408
409
410
411
    def _run_proc(self, benchmark_name, mode, vars):
        """Run the process.

        Args:
            benchmark_name (str): Benchmark name.
            mode (DictConfig): Runner mode.
            vars (dict): Process variables.

        Returns:
            int: Process return code.
        """
        mode.update(vars)
412
413
        if mode.name == 'mpi' and mode.pattern:
            mode.env.update({'SB_MODE_SERIAL_INDEX': mode.serial_index, 'SB_MODE_PARALLEL_INDEX': mode.parallel_index})
414
        logger.info('Runner is going to run %s in %s mode, proc rank %d.', benchmark_name, mode.name, mode.proc_rank)
415
416

        timeout = self._sb_benchmarks[benchmark_name].timeout
417
        if isinstance(timeout, int):
418
            timeout = max(timeout, 60)
419

420
        env_list = '--env-file /tmp/sb.env'
421
422
        if self._docker_config.skip:
            env_list = 'set -o allexport && source /tmp/sb.env && set +o allexport'
423
424
        for k, v in mode.env.items():
            if isinstance(v, str):
425
426
427
428
429
                envvar = f'{k}={str(v).format(proc_rank=mode.proc_rank, proc_num=mode.proc_num)}'
                env_list += f' -e {envvar}' if not self._docker_config.skip else f' && export {envvar}'

        fcmd = "docker exec {env_list} sb-workspace bash -c '{command}'"
        if self._docker_config.skip:
430
            fcmd = "bash -c '{env_list} && cd $SB_WORKSPACE && {command}'"
Yifan Xiong's avatar
Yifan Xiong committed
431
        ansible_runner_config = self._ansible_client.get_shell_config(
432
            fcmd.format(env_list=env_list, command=self.__get_mode_command(benchmark_name, mode, timeout))
433
        )
434
        if mode.name == 'mpi' and mode.node_num != 1:
Yifan Xiong's avatar
Yifan Xiong committed
435
            ansible_runner_config = self._ansible_client.update_mpi_config(ansible_runner_config)
436

437
438
        if isinstance(timeout, int):
            # we do not expect timeout in ansible unless subprocess hangs
439
            ansible_runner_config['timeout'] = timeout + 60
440

441
        rc = self._ansible_client.run(ansible_runner_config, sudo=(not self._docker_config.skip))
442
443
        return rc

444
    def run(self):
445
446
447
448
449
450
        """Run the SuperBench benchmarks distributedly."""
        self.check_env()
        for benchmark_name in self._sb_benchmarks:
            if benchmark_name not in self._sb_enabled_benchmarks:
                continue
            benchmark_config = self._sb_benchmarks[benchmark_name]
451
            for mode in benchmark_config.modes:
452
                ansible_rc = 0
453
                if mode.name == 'local':
454
                    rc_list = Parallel(n_jobs=mode.proc_num if mode.parallel else 1)(
455
456
457
                        delayed(self._run_proc)(benchmark_name, mode, {
                            'proc_rank': proc_rank
                        }) for proc_rank in range(mode.proc_num)
458
                    )
459
                    ansible_rc = sum(rc_list)
Yifan Xiong's avatar
Yifan Xiong committed
460
                elif mode.name == 'torch.distributed' or mode.name == 'mpi':
461
462
463
                    if not mode.pattern:
                        ansible_rc = self._run_proc(benchmark_name, mode, {'proc_rank': 0})
                    else:
464
465
466
                        if not os.path.exists(self._output_path / 'hostfile'):
                            logger.warning('No hostfile under %s.', self._output_path)
                            continue
467
468
                        with open(self._output_path / 'hostfile', 'r') as f:
                            host_list = f.read().splitlines()
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
                        host_groups = gen_traffic_pattern_host_groups(
                            host_list, mode.pattern, self._output_path / 'mpi_pattern.txt', benchmark_name
                        )
                        for serial_index, host_group in enumerate(host_groups):
                            para_rc_list = Parallel(n_jobs=len(host_group))(
                                delayed(self._run_proc)(
                                    benchmark_name,
                                    mode,
                                    vars={
                                        'proc_rank': 0,
                                        'host_list': host_list,
                                        'serial_index': str(serial_index),
                                        'parallel_index': str(parallel_index),
                                    }
                                ) for parallel_index, host_list in enumerate(host_group)
484
485
                            )
                            ansible_rc = ansible_rc + sum(para_rc_list)
Yifan Xiong's avatar
Yifan Xiong committed
486
487
                else:
                    logger.warning('Unknown mode %s.', mode.name)
488
489
                if ansible_rc != 0:
                    self.cleanup()
490
            self.fetch_results()
491
492

        self.__create_results_summary()