test_executor.py 6.13 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""SuperBench Executor test."""

6
import json
7
8
9
10
import unittest
import shutil
import tempfile
from pathlib import Path
11
from unittest import mock
12

13
import yaml
14
15
16
17
18
19
20
21
22
23
24
25
26
27
from omegaconf import OmegaConf

from superbench.executor import SuperBenchExecutor


class ExecutorTestCase(unittest.TestCase):
    """A class for executor test cases.

    Args:
        unittest.TestCase (unittest.TestCase): TestCase class.
    """
    def setUp(self):
        """Hook method for setting up the test fixture before exercising it."""
        default_config_file = Path(__file__).parent / '../../superbench/config/default.yaml'
28
29
        with default_config_file.open() as fp:
            self.default_config = OmegaConf.create(yaml.load(fp, Loader=yaml.SafeLoader))
30
31
        self.output_dir = tempfile.mkdtemp()

Yifan Xiong's avatar
Yifan Xiong committed
32
        self.executor = SuperBenchExecutor(self.default_config, self.output_dir)
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65

    def tearDown(self):
        """Hook method for deconstructing the test fixture after testing it."""
        shutil.rmtree(self.output_dir)

    def test_set_logger(self):
        """Test log file exists."""
        expected_log_file = Path(self.executor._output_dir) / 'sb-exec.log'
        self.assertTrue(expected_log_file.is_file())

    def test_get_enabled_benchmarks_enable_none(self):
        """Test enabled benchmarks when superbench.enable is none."""
        expected_enabled_benchmarks = list(self.default_config.superbench.benchmarks.keys())
        self.assertListEqual(self.executor._sb_enabled, expected_enabled_benchmarks)

    def test_get_enabled_benchmarks_enable_str(self):
        """Test enabled benchmarks when superbench.enable is string."""
        self.executor._sb_config.superbench.enable = 'benchmark_alpha'
        expected_enabled_benchmarks = ['benchmark_alpha']
        self.assertListEqual(self.executor._SuperBenchExecutor__get_enabled_benchmarks(), expected_enabled_benchmarks)

    def test_get_enabled_benchmarks_enable_list(self):
        """Test enabled benchmarks when superbench.enable is list."""
        self.executor._sb_config.superbench.enable = ['benchmark_alpha', 'benchmark_beta']
        expected_enabled_benchmarks = ['benchmark_alpha', 'benchmark_beta']
        self.assertListEqual(self.executor._SuperBenchExecutor__get_enabled_benchmarks(), expected_enabled_benchmarks)

    def test_get_platform(self):
        """Test get platform."""
        self.assertEqual(self.executor._SuperBenchExecutor__get_platform().value, 'CUDA')

    def test_get_arguments(self):
        """Test benchmarks arguments."""
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
        test_cases = [
            {
                'parameters': None,
                'expected_args': '',
            },
            {
                'parameters': {
                    'duration': 0,
                    'num_warmup': 16,
                    'num_steps': 128,
                    'batch_size': 16,
                    'precision': ['float32', 'float16'],
                    'model_action': ['train', 'inference'],
                },
                'expected_args': (
                    '--duration 0 --num_warmup 16 --num_steps 128 --batch_size 16 '
                    '--precision float32 float16 --model_action train inference'
                ),
            },
        ]
        for test_case in test_cases:
            with self.subTest(msg='Testing with case', test_case=test_case):
                self.assertEqual(
                    self.executor._SuperBenchExecutor__get_arguments(test_case['parameters']),
                    test_case['expected_args']
                )
92

93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
    def test_create_benchmark_dir(self):
        """Test __create_benchmark_dir."""
        foo_path = Path(self.output_dir, 'benchmarks', 'foo')
        self.executor._SuperBenchExecutor__create_benchmark_dir('foo')
        self.assertTrue(foo_path.is_dir())
        self.assertFalse(any(foo_path.iterdir()))

        (foo_path / 'bar.txt').touch()
        self.executor._SuperBenchExecutor__create_benchmark_dir('foo')
        self.assertTrue(foo_path.is_dir())
        self.assertFalse(any(foo_path.iterdir()))
        self.assertFalse((foo_path / 'bar.txt').is_file())
        self.assertTrue(foo_path.with_name('foo.1').is_dir())
        self.assertTrue((foo_path.with_name('foo.1') / 'bar.txt').is_file())

        (foo_path / 'bar.json').touch()
        self.executor._SuperBenchExecutor__create_benchmark_dir('foo')
        self.assertTrue(foo_path.is_dir())
        self.assertFalse(any(foo_path.iterdir()))
        self.assertFalse((foo_path / 'bar.json').is_file())
        self.assertTrue(foo_path.with_name('foo.2').is_dir())
        self.assertTrue((foo_path.with_name('foo.2') / 'bar.json').is_file())

    def test_write_benchmark_results(self):
        """Test __write_benchmark_results."""
        foobar_path = Path(self.output_dir, 'benchmarks', 'foobar')
        foobar_results_path = foobar_path / 'results.json'
        self.executor._SuperBenchExecutor__create_benchmark_dir('foobar')
        foobar_results = {
            'sum': 1,
            'avg': 1.1,
        }
        self.executor._SuperBenchExecutor__write_benchmark_results('foobar', foobar_results)
        self.assertTrue(foobar_results_path.is_file())
        with foobar_results_path.open(mode='r') as f:
            self.assertDictEqual(json.load(f), foobar_results)

130
131
132
133
    def test_exec_empty_benchmarks(self):
        """Test execute empty benchmarks, nothing should happen."""
        self.executor._sb_enabled = []
        self.executor.exec()
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148

    @mock.patch('superbench.executor.SuperBenchExecutor._SuperBenchExecutor__exec_benchmark')
    def test_exec_default_benchmarks(self, mock_exec_benchmark):
        """Test execute default benchmarks, mock exec function.

        Args:
            mock_exec_benchmark (function): Mocked __exec_benchmark function.
        """
        mock_exec_benchmark.return_value = {}
        self.executor.exec()

        self.assertTrue(Path(self.output_dir, 'benchmarks').is_dir())
        for benchmark_name in self.executor._sb_benchmarks:
            self.assertTrue(Path(self.output_dir, 'benchmarks', benchmark_name).is_dir())
            self.assertTrue(Path(self.output_dir, 'benchmarks', benchmark_name, 'results.json').is_file())