test_disk_performance.py 10.2 KB
Newer Older
1
2
3
4
5
6
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""Tests for disk-performance benchmark."""

import unittest
7
from unittest import mock
8

9
10
from tests.helper import decorator
from tests.helper.testcase import BenchmarkTestCase
11
12
13
from superbench.benchmarks import BenchmarkRegistry, BenchmarkType, ReturnCode, Platform


14
class DiskBenchmarkTest(BenchmarkTestCase, unittest.TestCase):
15
    """Test class for disk-performance benchmark."""
16
17
18
19
20
21
    @classmethod
    def setUpClass(cls):
        """Hook method for setting up class fixture before running tests in the class."""
        super().setUpClass()
        cls.createMockEnvs(cls)
        cls.createMockFiles(cls, ['bin/fio'])
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118

    def test_disk_performance_empty_param(self):
        """Test disk-performance benchmark command generation with empty parameter."""
        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)

        benchmark = benchmark_class(benchmark_name, parameters='')

        # Check basic information
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is True)
        assert (benchmark.return_code == ReturnCode.SUCCESS)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

        # Command list should be empty
        assert (0 == len(benchmark._commands))

    @mock.patch('pathlib.Path.is_block_device')
    def test_disk_performance_invalid_block_device(self, mock_is_block_device):
        """Test disk-performance benchmark command generation with invalid block device."""
        mock_is_block_device.return_value = False

        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)

        block_devices = ['mock_block_device_0']
        block_device_option = '--block_devices ' + ' '.join(block_devices)

        benchmark = benchmark_class(benchmark_name, parameters=block_device_option)

        # Check basic information
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is False)
        assert (benchmark.return_code == ReturnCode.INVALID_ARGUMENT)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

    @mock.patch('pathlib.Path.is_block_device')
    def test_disk_performance_benchmark_disabled(self, mock_is_block_device):
        """Test disk-performance benchmark command generation with all benchmarks disabled."""
        mock_is_block_device.return_value = True

        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)

        block_devices = ['/dev/nvme0n1', '/dev/nvme1n1']
        block_device_option = '--block_devices ' + ' '.join(block_devices)

        param_str = block_device_option
        param_str += ' --rand_precond_time=0'
        param_str += ' --seq_read_runtime=0'
        param_str += ' --rand_read_runtime=0'
        benchmark = benchmark_class(benchmark_name, parameters=param_str)

        # Check basic information
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is True)
        assert (benchmark.return_code == ReturnCode.SUCCESS)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

        # Command list should be empty
        assert (0 == len(benchmark._commands))

    @mock.patch('pathlib.Path.is_block_device')
    def test_disk_performance_benchmark_enabled(self, mock_is_block_device):
        """Test disk-performance benchmark command generation with all benchmarks enabled."""
        mock_is_block_device.return_value = True

        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)

        block_devices = ['mock_block_device_0', 'mock_block_device_1']
        block_device_option = '--block_devices ' + ' '.join(block_devices)

        init_test_magic = 45
        curr_test_magic = init_test_magic
        param_str = block_device_option
        # Sequential precondition
        param_str += ' --enable_seq_precond'
        # Random precondition
        param_str += ' --rand_precond_time=%d' % curr_test_magic
        curr_test_magic += 1
        # Seq/rand read/write
        for io_pattern in ['seq', 'rand']:
119
            for io_type in ['read', 'write', 'readwrite']:
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
                io_str = '%s_%s' % (io_pattern, io_type)
                param_str += ' --%s_ramp_time=%d' % (io_str, curr_test_magic)
                curr_test_magic += 1
                param_str += ' --%s_runtime=%d' % (io_str, curr_test_magic)
                curr_test_magic += 1
                param_str += ' --%s_iodepth=%d' % (io_str, curr_test_magic)
                curr_test_magic += 1
                param_str += ' --%s_numjobs=%d' % (io_str, curr_test_magic)
                curr_test_magic += 1
        benchmark = benchmark_class(benchmark_name, parameters=param_str)

        # Check basic information
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is True)
        assert (benchmark.return_code == ReturnCode.SUCCESS)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

        # Check command list
140
141
        # 2 files * (2 preconditions + 3 io_patterns * 2 io_types) = 16 commands
        assert (16 == len(benchmark._commands))
142
143
144

        # Check parameter assignments
        command_idx = 0
145
        default_rwmixread = 80
146
147
148
149
150
151
152
153
154
155
156
157
158
        for block_device in block_devices:
            curr_test_magic = init_test_magic

            # Sequential precondition
            assert ('--filename=%s' % block_device in benchmark._commands[command_idx])
            command_idx += 1
            # Random precondition
            assert ('--filename=%s' % block_device in benchmark._commands[command_idx])
            assert ('--runtime=%d' % curr_test_magic in benchmark._commands[command_idx])
            curr_test_magic += 1
            command_idx += 1
            # Seq/rand read/write
            for io_pattern in ['seq', 'rand']:
159
                for io_type in ['read', 'write', 'rw']:
160
161
162
163
164
165
166
167
168
169
170
                    assert ('--filename=%s' % block_device in benchmark._commands[command_idx])
                    fio_rw = '%s%s' % (io_pattern if io_pattern == 'rand' else '', io_type)
                    assert ('--rw=%s' % fio_rw in benchmark._commands[command_idx])
                    assert ('--ramp_time=%d' % curr_test_magic in benchmark._commands[command_idx])
                    curr_test_magic += 1
                    assert ('--runtime=%d' % curr_test_magic in benchmark._commands[command_idx])
                    curr_test_magic += 1
                    assert ('--iodepth=%d' % curr_test_magic in benchmark._commands[command_idx])
                    curr_test_magic += 1
                    assert ('--numjobs=%d' % curr_test_magic in benchmark._commands[command_idx])
                    curr_test_magic += 1
171
172
                    if io_type == 'rw':
                        assert ('--rwmixread=%d' % default_rwmixread in benchmark._commands[command_idx])
173
174
                    command_idx += 1

175
176
    @decorator.load_data('tests/data/disk_performance.log')
    def test_disk_performance_result_parsing(self, test_raw_output):
177
178
179
180
181
182
183
184
185
186
187
188
189
190
        """Test disk-performance benchmark result parsing."""
        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)
        benchmark = benchmark_class(benchmark_name, parameters='')
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is True)
        assert (benchmark.return_code == ReturnCode.SUCCESS)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

        # Positive case - valid raw output.
191
        jobname_prefix = 'nvme0n1_rand_read_write'
192
193
194
195
        assert (benchmark._process_raw_result(0, test_raw_output))
        assert (benchmark.return_code == ReturnCode.SUCCESS)

        # bs + <read, write> x <iops, 95th, 99th, 99.9th>
196
        assert (9 + benchmark.default_metric_count == len(benchmark.result.keys()))
197

198
199
200
201
202
203
204
205
        assert (1 == len(benchmark.result[jobname_prefix + '_bs']))
        assert (4096 == benchmark.result[jobname_prefix + '_bs'][0])

        assert (1 == len(benchmark.result[jobname_prefix + '_read_iops']))
        assert (85138.890741 == benchmark.result[jobname_prefix + '_read_iops'][0])
        assert (1 == len(benchmark.result[jobname_prefix + '_write_iops']))
        assert (85066.128925 == benchmark.result[jobname_prefix + '_write_iops'][0])

206
207
208
209
210
211
212
213
214
215
216
217
218
        assert (1 == len(benchmark.result[jobname_prefix + '_read_lat_ns_95.0']))
        assert (1941504 == benchmark.result[jobname_prefix + '_read_lat_ns_95.0'][0])
        assert (1 == len(benchmark.result[jobname_prefix + '_read_lat_ns_99.0']))
        assert (2244608 == benchmark.result[jobname_prefix + '_read_lat_ns_99.0'][0])
        assert (1 == len(benchmark.result[jobname_prefix + '_read_lat_ns_99.9']))
        assert (3620864 == benchmark.result[jobname_prefix + '_read_lat_ns_99.9'][0])

        assert (1 == len(benchmark.result[jobname_prefix + '_write_lat_ns_95.0']))
        assert (1908736 == benchmark.result[jobname_prefix + '_write_lat_ns_95.0'][0])
        assert (1 == len(benchmark.result[jobname_prefix + '_write_lat_ns_99.0']))
        assert (2072576 == benchmark.result[jobname_prefix + '_write_lat_ns_99.0'][0])
        assert (1 == len(benchmark.result[jobname_prefix + '_write_lat_ns_99.9']))
        assert (2605056 == benchmark.result[jobname_prefix + '_write_lat_ns_99.9'][0])
219
220
221
222

        # Negative case - invalid raw output.
        assert (benchmark._process_raw_result(1, 'Invalid raw output') is False)
        assert (benchmark.return_code == ReturnCode.MICROBENCHMARK_RESULT_PARSING_FAILURE)