test_runner.py 22.8 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""SuperBench Runner test."""

6
import json
7
8
9
10
import unittest
import shutil
import tempfile
from pathlib import Path
11
from unittest import mock
12

13
import yaml
14
15
16
17
18
19
20
21
22
from omegaconf import OmegaConf

from superbench.runner import SuperBenchRunner


class RunnerTestCase(unittest.TestCase):
    """A class for runner test cases."""
    def setUp(self):
        """Hook method for setting up the test fixture before exercising it."""
23
24
25
        test_config_file = Path(__file__).parent / '../../tests/data/test.yaml'
        with test_config_file.open() as fp:
            self.test_config = OmegaConf.create(yaml.load(fp, Loader=yaml.SafeLoader))
26
        self.sb_output_dir = tempfile.mkdtemp()
27

28
        self.runner = SuperBenchRunner(
29
            self.test_config,
30
31
32
33
            OmegaConf.create({}),
            OmegaConf.create({}),
            self.sb_output_dir,
        )
34
35
36

    def tearDown(self):
        """Hook method for deconstructing the test fixture after testing it."""
37
        shutil.rmtree(self.sb_output_dir)
38
39
40

    def test_set_logger(self):
        """Test log file exists."""
41
        expected_log_file = Path(self.runner._sb_output_dir) / 'sb-run.log'
42
43
        self.assertTrue(expected_log_file.is_file())

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
    def test_validate_sb_config(self):
        """Test validate_sb_config."""
        self.runner._SuperBenchRunner__validate_sb_config()
        self.assertIn('env', self.runner._sb_config.superbench)
        for name in self.runner._sb_benchmarks:
            self.assertIn('modes', self.runner._sb_config.superbench.benchmarks[name])
            for mode in self.runner._sb_config.superbench.benchmarks[name].modes:
                self.assertIn('env', mode)
                if mode.name == 'local':
                    self.assertIn('proc_num', mode)
                    self.assertIn('prefix', mode)
                if mode.name == 'torch.distributed':
                    self.assertIn('proc_num', mode)
                if mode.name == 'mpi':
                    self.assertIn('mca', mode)

60
61
62
63
    def test_get_failure_count(self):
        """Test get_failure_count."""
        self.assertEqual(0, self.runner.get_failure_count())

64
65
66
67
    def test_get_mode_command(self):
        """Test __get_mode_command."""
        test_cases = [
            {
68
                'benchmark_name': 'foo',
69
70
71
                'mode': {
                    'name': 'non_exist',
                },
72
73
                'expected_command':
                f'sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo',
74
75
            },
            {
76
77
                'benchmark_name':
                'foo',
78
                'mode': {
79
80
                    'name': 'local',
                    'proc_num': 1,
81
                    'proc_rank': 0,
82
                    'prefix': '',
83
                },
84
                'expected_command':
85
                f'PROC_RANK=0 sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo',
86
87
88
89
90
91
92
93
            },
            {
                'benchmark_name':
                'foo',
                'mode': {
                    'name': 'local',
                    'proc_num': 8,
                    'proc_rank': 6,
94
                    'prefix': 'CUDA_VISIBLE_DEVICES={proc_rank} numactl -N $(({proc_rank}/2))'
95
                },
96
                'expected_command': (
97
                    'PROC_RANK=6 CUDA_VISIBLE_DEVICES=6 numactl -N $((6/2)) '
98
99
                    f'sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo'
                ),
100
101
            },
            {
102
103
                'benchmark_name':
                'foo',
104
105
106
107
108
109
                'mode': {
                    'name': 'local',
                    'proc_num': 16,
                    'proc_rank': 1,
                    'prefix': 'RANK={proc_rank} NUM={proc_num}'
                },
110
111
112
113
                'expected_command': (
                    'PROC_RANK=1 RANK=1 NUM=16 '
                    f'sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo'
                ),
114
115
            },
            {
116
117
                'benchmark_name':
                'foo',
118
119
120
121
122
123
                'mode': {
                    'name': 'torch.distributed',
                    'proc_num': 1,
                    'node_num': 'all',
                },
                'expected_command': (
124
125
                    'torchrun '
                    '--no_python --nproc_per_node=1 '
126
127
                    '--nnodes=$NNODES --node_rank=$NODE_RANK '
                    '--master_addr=$MASTER_ADDR --master_port=$MASTER_PORT '
128
                    f'sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo '
129
130
                    'superbench.benchmarks.foo.parameters.distributed_impl=ddp '
                    'superbench.benchmarks.foo.parameters.distributed_backend=nccl'
131
132
133
                ),
            },
            {
134
135
                'benchmark_name':
                'foo',
136
137
138
139
140
141
                'mode': {
                    'name': 'torch.distributed',
                    'proc_num': 8,
                    'node_num': 1,
                },
                'expected_command': (
142
143
                    'torchrun '
                    '--no_python --nproc_per_node=8 '
144
                    f'sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo '
145
146
                    'superbench.benchmarks.foo.parameters.distributed_impl=ddp '
                    'superbench.benchmarks.foo.parameters.distributed_backend=nccl'
147
148
                ),
            },
Yifan Xiong's avatar
Yifan Xiong committed
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
            {
                'benchmark_name':
                'foo',
                'mode': {
                    'name': 'mpi',
                    'proc_num': 8,
                    'proc_rank': 1,
                    'mca': {},
                    'env': {
                        'PATH': None,
                        'LD_LIBRARY_PATH': None,
                    },
                },
                'expected_command': (
                    'mpirun -tag-output -allow-run-as-root -hostfile hostfile -map-by ppr:8:node -bind-to numa '
                    ' -x PATH -x LD_LIBRARY_PATH '
                    f'sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo'
                ),
            },
            {
                'benchmark_name':
                'foo',
                'mode': {
                    'name': 'mpi',
                    'proc_num': 8,
                    'proc_rank': 2,
                    'mca': {
                        'coll_hcoll_enable': 0,
                    },
                    'env': {
                        'SB_MICRO_PATH': '/sb',
                        'FOO': 'BAR',
181
182
                        'RANK': '{proc_rank}',
                        'NUM': '{proc_num}',
Yifan Xiong's avatar
Yifan Xiong committed
183
184
185
186
                    },
                },
                'expected_command': (
                    'mpirun -tag-output -allow-run-as-root -hostfile hostfile -map-by ppr:8:node -bind-to numa '
187
                    '-mca coll_hcoll_enable 0 -x SB_MICRO_PATH=/sb -x FOO=BAR -x RANK=2 -x NUM=8 '
Yifan Xiong's avatar
Yifan Xiong committed
188
189
190
                    f'sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo'
                ),
            },
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
            {
                'benchmark_name':
                'foo',
                'mode': {
                    'name': 'mpi',
                    'node_num': 1,
                    'proc_num': 8,
                    'proc_rank': 2,
                    'mca': {
                        'coll_hcoll_enable': 0,
                    },
                    'env': {
                        'SB_MICRO_PATH': '/sb',
                        'FOO': 'BAR',
                        'RANK': '{proc_rank}',
                        'NUM': '{proc_num}',
                    },
                },
                'expected_command': (
                    'mpirun -tag-output -allow-run-as-root -host localhost:8 -bind-to numa '
                    '-mca coll_hcoll_enable 0 -x SB_MICRO_PATH=/sb -x FOO=BAR -x RANK=2 -x NUM=8 '
                    f'sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo'
                ),
            },
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
            {
                'benchmark_name':
                'foo',
                'mode': {
                    'name': 'mpi',
                    'node_num': 1,
                    'proc_num': 4,
                    'proc_rank': 1,
                    'mca': {},
                    'env': {
                        'NCCL_BUFFSIZE': 4194304,
                        'NCCL_RINGS': '0 1 2 3|0 3 2 1',
                        'PATH': None,
                    },
                },
                'expected_command': (
                    "mpirun -tag-output -allow-run-as-root -host localhost:4 -bind-to numa "
                    '-x "NCCL_BUFFSIZE=4194304" -x "NCCL_RINGS=0 1 2 3|0 3 2 1" -x PATH '
                    f'sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo'
                ),
            },
236
237
238
239
240
241
242
243
244
            {
                'benchmark_name':
                'foo',
                'mode': {
                    'name': 'mpi',
                    'proc_num': 8,
                    'proc_rank': 1,
                    'mca': {},
                    'pattern': {
245
                        'type': 'all-nodes',
246
247
248
249
250
251
252
253
254
255
256
257
                    },
                    'env': {
                        'PATH': None,
                        'LD_LIBRARY_PATH': None,
                    },
                },
                'expected_command': (
                    'mpirun -tag-output -allow-run-as-root -host node0:8,node1:8 -bind-to numa '
                    ' -x PATH -x LD_LIBRARY_PATH '
                    f'sb exec --output-dir {self.sb_output_dir} -c sb.config.yaml -C superbench.enable=foo'
                ),
            },
258
        ]
259

260
261
        for test_case in test_cases:
            with self.subTest(msg='Testing with case', test_case=test_case):
262
263
264
                mode = OmegaConf.create(test_case['mode'])
                if 'pattern' in test_case['mode']:
                    mode.update({'host_list': ['node0', 'node1']})
265
266
                self.assertEqual(
                    self.runner._SuperBenchRunner__get_mode_command(
267
268
                        test_case['benchmark_name'],
                        mode,
269
270
271
                    ), test_case['expected_command']
                )

272
273
274
275
276
                test_case['timeout'] = 10
                timeout_str = 'timeout {} '.format(test_case['timeout'])
                index = test_case['expected_command'].find('sb exec')
                expected_command = test_case['expected_command'][:index] + timeout_str + test_case['expected_command'][
                    index:]
277
278
279
                mode = OmegaConf.create(test_case['mode'])
                if 'pattern' in test_case['mode']:
                    mode.update({'host_list': ['node0', 'node1']})
280
281
                self.assertEqual(
                    self.runner._SuperBenchRunner__get_mode_command(
282
283
284
                        test_case['benchmark_name'],
                        mode,
                        test_case['timeout'],
285
286
287
                    ), expected_command
                )

288
289
    def test_run_empty_benchmarks(self):
        """Test run empty benchmarks, nothing should happen."""
290
        self.runner._sb_enabled_benchmarks = []
291
        self.runner.run()
292
293
294
295
296
297
298
299
300
301

    @mock.patch('superbench.runner.ansible.AnsibleClient.run')
    def test_run_default_benchmarks(self, mock_ansible_client_run):
        """Test run default benchmarks, mock AnsibleClient.run function.

        Args:
            mock_ansible_client_run (function): Mocked AnsibleClient.run function.
        """
        mock_ansible_client_run.return_value = 0
        self.runner.run()
302

303
304
    def test_merge_benchmark_metrics(self):
        """Test __merge_benchmark_metrics."""
305
306
307
308
        result_summary = json.loads(
            '{"kernel-launch": {"overhead_event": [[0.00583], [0.00545], [0.00581], [0.00572], [0.00559], [0.00591], '
            '[0.00562], [0.00586]], "overhead_wall": [[0.01018], [0.01039], [0.01067], [0.01079], [0.00978], '
            '[0.01085], [0.01036], [0.01033]]}, "resnet_models/pytorch-resnet50": {"steptime_train_float32": '
309
310
311
312
313
314
315
            '[[252.03]], "throughput_train_float32": [[764.57]], "steptime_train_float16": [[198.36]], '
            '"throughput_train_float16": [[972.64]]}, "resnet_models/pytorch-resnet101": {"steptime_train_float32": '
            '[[385.53]], "throughput_train_float32": [[499.39]], "steptime_train_float16": [[307.49]], '
            '"throughput_train_float16": [[627.21]]}, "pytorch-sharding-matmul": {"allreduce": [[10.56, 10.66], '
            '[10.87, 10.32], [10.56, 10.45], [10.56, 10.60], [10.56, 10.45], [10.56, 10.38], [10.56, 10.33], '
            '[10.56, 10.69]], "allgather": [[10.08, 10.10], [10.08, 10.16], [10.08, 10.06], [10.56, 10.04], '
            '[10.08, 10.05], [10.08, 10.09], [10.08, 10.08], [10.08, 10.06]]}}'
316
317
318
        )
        reduce_ops = json.loads(
            '{"kernel-launch/overhead_event": null, "kernel-launch/overhead_wall": null, '
319
320
321
322
323
324
325
326
            '"resnet_models/pytorch-resnet50/steptime_train_float32": null, '
            '"resnet_models/pytorch-resnet50/throughput_train_float32": null, '
            '"resnet_models/pytorch-resnet50/steptime_train_float16": null, '
            '"resnet_models/pytorch-resnet50/throughput_train_float16": null, '
            '"resnet_models/pytorch-resnet101/steptime_train_float32": null, '
            '"resnet_models/pytorch-resnet101/throughput_train_float32": null, '
            '"resnet_models/pytorch-resnet101/steptime_train_float16": null, '
            '"resnet_models/pytorch-resnet101/throughput_train_float16": null, '
327
328
329
330
331
332
333
334
335
336
337
338
            '"pytorch-sharding-matmul/allreduce": "max", "pytorch-sharding-matmul/allgather": "max"}'
        )

        expected = json.loads(
            '{"kernel-launch/overhead_event:0": 0.00583, "kernel-launch/overhead_event:1": 0.00545, '
            '"kernel-launch/overhead_event:2": 0.00581, "kernel-launch/overhead_event:3": 0.00572, '
            '"kernel-launch/overhead_event:4": 0.00559, "kernel-launch/overhead_event:5": 0.00591, '
            '"kernel-launch/overhead_event:6": 0.00562, "kernel-launch/overhead_event:7": 0.00586, '
            '"kernel-launch/overhead_wall:0": 0.01018, "kernel-launch/overhead_wall:1": 0.01039, '
            '"kernel-launch/overhead_wall:2": 0.01067, "kernel-launch/overhead_wall:3": 0.01079, '
            '"kernel-launch/overhead_wall:4": 0.00978, "kernel-launch/overhead_wall:5": 0.01085, '
            '"kernel-launch/overhead_wall:6": 0.01036, "kernel-launch/overhead_wall:7": 0.01033, '
339
340
341
342
343
344
345
346
            '"resnet_models/pytorch-resnet50/steptime_train_float32": 252.03, '
            '"resnet_models/pytorch-resnet50/throughput_train_float32": 764.57, '
            '"resnet_models/pytorch-resnet50/steptime_train_float16": 198.36, '
            '"resnet_models/pytorch-resnet50/throughput_train_float16": 972.64, '
            '"resnet_models/pytorch-resnet101/steptime_train_float32": 385.53, '
            '"resnet_models/pytorch-resnet101/throughput_train_float32": 499.39, '
            '"resnet_models/pytorch-resnet101/steptime_train_float16": 307.49, '
            '"resnet_models/pytorch-resnet101/throughput_train_float16": 627.21, '
347
348
349
            '"pytorch-sharding-matmul/0/allreduce": 10.87, "pytorch-sharding-matmul/1/allreduce": 10.69, '
            '"pytorch-sharding-matmul/0/allgather": 10.56, "pytorch-sharding-matmul/1/allgather": 10.16}'
        )
350
351
352
353
354
355
        self.assertEqual(self.runner._SuperBenchRunner__merge_benchmark_metrics(result_summary, reduce_ops), expected)

    def test_merge_monitor_metrics(self):
        """Test __merge_monitor_metrics."""
        path = Path('tests/data/monitor/')
        expected = {
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
            'monitor/gpu_temperature:0': 50,
            'monitor/gpu_temperature:1': 27,
            'monitor/gpu_temperature:2': 24,
            'monitor/gpu_temperature:3': 26,
            'monitor/gpu_temperature:4': 25,
            'monitor/gpu_temperature:5': 25,
            'monitor/gpu_temperature:6': 23,
            'monitor/gpu_temperature:7': 26,
            'monitor/gpu_power_limit:0': 250,
            'monitor/gpu_power_limit:1': 200,
            'monitor/gpu_power_limit:2': 250,
            'monitor/gpu_power_limit:3': 250,
            'monitor/gpu_power_limit:4': 250,
            'monitor/gpu_power_limit:5': 250,
            'monitor/gpu_power_limit:6': 250,
            'monitor/gpu_power_limit:7': 250,
            'monitor/gpu_corrected_ecc:0': 12,
            'monitor/gpu_corrected_ecc:1': 0,
            'monitor/gpu_corrected_ecc:2': 0,
            'monitor/gpu_corrected_ecc:3': 0,
            'monitor/gpu_corrected_ecc:4': 0,
            'monitor/gpu_corrected_ecc:5': 0,
            'monitor/gpu_corrected_ecc:6': 0,
            'monitor/gpu_corrected_ecc:7': 0,
            'monitor/gpu_uncorrected_ecc:0': 0,
            'monitor/gpu_uncorrected_ecc:1': 0,
            'monitor/gpu_uncorrected_ecc:2': 0,
            'monitor/gpu_uncorrected_ecc:3': 0,
            'monitor/gpu_uncorrected_ecc:4': 0,
            'monitor/gpu_uncorrected_ecc:5': 0,
            'monitor/gpu_uncorrected_ecc:6': 0,
            'monitor/gpu_uncorrected_ecc:7': 0
388
389
        }
        self.assertEqual(self.runner._SuperBenchRunner__merge_monitor_metrics(path), expected)
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449

    def test_generate_metric_name(self):
        """Test __generate_metric_name."""
        """(self, benchmark_name, metric, rank_count, run_count, curr_rank, curr_run):"""
        test_cases = [
            {
                'benchmark_name': 'kernel-launch',
                'metric': 'overhead_event',
                'rank_count': 8,
                'run_count': 2,
                'curr_rank': 0,
                'curr_run': 0,
                'expected': 'kernel-launch/0/overhead_event:0',
            },
            {
                'benchmark_name': 'kernel-launch',
                'metric': 'overhead_event',
                'rank_count': 8,
                'run_count': 2,
                'curr_rank': 2,
                'curr_run': 1,
                'expected': 'kernel-launch/1/overhead_event:2',
            },
            {
                'benchmark_name': 'kernel-launch',
                'metric': 'overhead_event',
                'rank_count': 1,
                'run_count': 1,
                'curr_rank': 0,
                'curr_run': 0,
                'expected': 'kernel-launch/overhead_event',
            },
            {
                'benchmark_name': 'resnet_models/pytorch-resnet50',
                'metric': 'fp32_train_step_time',
                'rank_count': 1,
                'run_count': 2,
                'curr_rank': 0,
                'curr_run': 1,
                'expected': 'resnet_models/pytorch-resnet50/1/fp32_train_step_time',
            },
            {
                'benchmark_name': 'resnet_models/pytorch-resnet50',
                'metric': 'fp32_train_step_time',
                'rank_count': 1,
                'run_count': 1,
                'curr_rank': 0,
                'curr_run': 0,
                'expected': 'resnet_models/pytorch-resnet50/fp32_train_step_time',
            },
        ]

        for test_case in test_cases:
            with self.subTest(msg='Testing with case', test_case=test_case):
                self.assertEqual(
                    self.runner._SuperBenchRunner__generate_metric_name(
                        test_case['benchmark_name'], test_case['metric'], test_case['rank_count'],
                        test_case['run_count'], test_case['curr_rank'], test_case['curr_run']
                    ), test_case['expected']
                )
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476

    def test_run_proc_timeout(self):
        """Test run_proc_ timeout."""
        self.runner._sb_benchmarks = {
            'benchmark1': {
                'timeout': 120
            },
            'benchmark2': {
                'timeout': None
            },
            'benchmark3': {
                'timeout': 30
            },
        }

        test_cases = [
            ('benchmark1', 120),
            ('benchmark2', None),
            ('benchmark3', 60),
        ]

        for benchmark_name, expected_timeout in test_cases:
            with self.subTest(benchmark_name=benchmark_name):
                timeout = self.runner._sb_benchmarks[benchmark_name].get('timeout', None)
                if isinstance(timeout, int):
                    timeout = max(timeout, 60)
                self.assertEqual(timeout, expected_timeout)
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493

    @mock.patch('superbench.runner.ansible.AnsibleClient.run')
    def test_run_proc_quotes_env_values(self, mock_ansible_client_run):
        """Test _run_proc quotes env values for docker exec and mpirun."""
        mock_ansible_client_run.return_value = 0
        self.runner._sb_benchmarks = {'foo': {}}
        captured = {}

        def fake_get_shell_config(cmd):
            captured['cmd'] = cmd
            return {'module_args': cmd, 'cmdline': '', 'host_pattern': 'localhost', 'module': 'shell'}

        self.runner._ansible_client.get_shell_config = fake_get_shell_config
        mode = OmegaConf.create({
            'name': 'mpi',
            'proc_num': 4,
            'node_num': 1,
one's avatar
one committed
494
            'bind_to': 'numa',
495
496
497
498
499
500
501
502
503
504
            'mca': {},
            'env': {
                'NCCL_BUFFSIZE': 4194304,
                'NCCL_RINGS': '0 1 2 3|0 3 2 1',
                'PATH': None,
            },
        })

        self.runner._run_proc('foo', mode, {'proc_rank': 0})

one's avatar
one committed
505
506
507
508
        self.assertIn('docker exec sb-workspace bash -lc', captured['cmd'])
        self.assertIn('source /root/sb.env', captured['cmd'])
        self.assertIn('export "NCCL_BUFFSIZE=4194304"', captured['cmd'])
        self.assertIn('export "NCCL_RINGS=0 1 2 3|0 3 2 1"', captured['cmd'])
509
510
        self.assertIn('-x "NCCL_BUFFSIZE=4194304"', captured['cmd'])
        self.assertIn('-x "NCCL_RINGS=0 1 2 3|0 3 2 1"', captured['cmd'])
one's avatar
one committed
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
        self.assertIn('-x PATH', captured['cmd'])
        self.assertNotIn('export "PATH=', captured['cmd'])

    @mock.patch('superbench.runner.ansible.AnsibleClient.run')
    def test_run_proc_no_docker_keeps_tmp_env_source(self, mock_ansible_client_run):
        """Test _run_proc still sources /tmp/sb.env in no_docker mode."""
        mock_ansible_client_run.return_value = 0
        self.runner._sb_benchmarks = {'foo': {}}
        self.runner._docker_config.skip = True
        captured = {}

        def fake_get_shell_config(cmd):
            captured['cmd'] = cmd
            return {'module_args': cmd, 'cmdline': '', 'host_pattern': 'localhost', 'module': 'shell'}

        self.runner._ansible_client.get_shell_config = fake_get_shell_config
        mode = OmegaConf.create({
            'name': 'local',
            'proc_num': 1,
            'env': {
                'FOO': 'a b',
            },
            'prefix': '',
        })

        self.runner._run_proc('foo', mode, {'proc_rank': 0})

        self.assertIn("bash -c 'set -o allexport && source /tmp/sb.env && set +o allexport", captured['cmd'])
        self.assertIn('export "FOO=a b"', captured['cmd'])
        self.assertIn('cd $SB_WORKSPACE && PROC_RANK=0 sb exec', captured['cmd'])