base.py 12.6 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Module of the base class."""

6
import shlex
7
8
import signal
import traceback
9
10
11
import argparse
import numbers
from datetime import datetime
12
from operator import attrgetter
13
14
from abc import ABC, abstractmethod

15
16
import numpy as np

17
18
19
20
21
from superbench.common.utils import logger
from superbench.benchmarks import BenchmarkType, ReturnCode
from superbench.benchmarks.result import BenchmarkResult


22
23
24
25
26
27
28
29
30
31
32
class SortedMetavarTypeHelpFormatter(argparse.MetavarTypeHelpFormatter):
    """Custom HelpFormatter class for argparse which sorts option strings."""
    def add_arguments(self, actions):
        """Sort option strings before original add_arguments.

        Args:
            actions (argparse.Action): Argument parser actions.
        """
        super(SortedMetavarTypeHelpFormatter, self).add_arguments(sorted(actions, key=attrgetter('option_strings')))


33
34
35
36
37
38
39
40
41
42
class Benchmark(ABC):
    """The base class of all benchmarks."""
    def __init__(self, name, parameters=''):
        """Constructor.

        Args:
            name (str): benchmark name.
            parameters (str): benchmark parameters.
        """
        self._name = name
43
        self._argv = list(filter(None, shlex.split(parameters))) if parameters is not None else list()
44
45
46
47
48
        self._benchmark_type = None
        self._parser = argparse.ArgumentParser(
            add_help=False,
            usage=argparse.SUPPRESS,
            allow_abbrev=False,
49
            formatter_class=SortedMetavarTypeHelpFormatter,
50
        )
51
52
        # Fix optionals title in Python 3.10
        self._parser._optionals.title = 'optional arguments'
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
        self._args = None
        self._curr_run_index = 0
        self._result = None

    def add_parser_arguments(self):
        """Add the specified arguments."""
        self._parser.add_argument(
            '--run_count',
            type=int,
            default=1,
            required=False,
            help='The run count of benchmark.',
        )
        self._parser.add_argument(
            '--duration',
            type=int,
            default=0,
            required=False,
            help='The elapsed time of benchmark in seconds.',
        )
73
74
75
76
77
78
        self._parser.add_argument(
            '--log_raw_data',
            action='store_true',
            default=False,
            help='Log raw data into file instead of saving it into result object.',
        )
79
80
81
82
83
84
        self._parser.add_argument(
            '--log_flushing',
            action='store_true',
            default=False,
            help='Real-time log flushing.',
        )
85
86
87
88
89
90
91

    def get_configurable_settings(self):
        """Get all the configurable settings.

        Return:
            All configurable settings in raw string.
        """
pdr's avatar
pdr committed
92
93
        message = self._parser.format_help().strip()
        return message
94

95
    def parse_args(self, ignore_invalid=False):
96
97
98
99
100
101
102
103
104
105
        """Parse the arguments.

        Return:
            ret (bool): whether parse succeed or not.
            args (argparse.Namespace): parsed arguments.
            unknown (list): unknown arguments.
        """
        try:
            args, unknown = self._parser.parse_known_args(self._argv)
        except BaseException as e:
106
107
108
109
110
111
            if ignore_invalid:
                logger.info('Missing or invliad parameters, will ignore the error and skip the args checking.')
                return True, None, []
            else:
                logger.error('Invalid argument - benchmark: {}, message: {}.'.format(self._name, str(e)))
                return False, None, []
112

113
114
115
116
117
118
119
120
121
122
123
124
125
        ret = self._check_unknown_args(unknown)

        return ret, args, unknown

    def _check_unknown_args(self, unknown):
        """Check for unknown arguments and log an error if any are found.

        Args:
            unknown (list): List of unknown arguments.

        Returns:
            bool: False if unknown arguments are found, True otherwise.
        """
126
        if len(unknown) > 0:
127
128
            logger.error(
                'Unknown arguments - benchmark: {}, unknown arguments: {}'.format(self._name, ' '.join(unknown))
129
            )
130
131
            return False
        return True
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

    def _preprocess(self):
        """Preprocess/preparation operations before the benchmarking.

        Return:
            True if _preprocess() succeed.
        """
        self.add_parser_arguments()
        ret, self._args, unknown = self.parse_args()

        if not ret:
            self._result = BenchmarkResult(self._name, self._benchmark_type, ReturnCode.INVALID_ARGUMENT)
            return False

        self._result = BenchmarkResult(
            self._name, self._benchmark_type, ReturnCode.SUCCESS, run_count=self._args.run_count
        )

        if not isinstance(self._benchmark_type, BenchmarkType):
            logger.error(
                'Invalid benchmark type - benchmark: {}, type: {}'.format(self._name, type(self._benchmark_type))
            )
            self._result.set_return_code(ReturnCode.INVALID_BENCHMARK_TYPE)
            return False

        return True

159
160
161
162
163
164
165
166
    def _postprocess(self):
        """Postprocess/cleanup operations after the benchmarking.

        Return:
            True if _postprocess() succeed.
        """
        return True

167
168
169
170
171
172
173
174
175
176
177
    @abstractmethod
    def _benchmark(self):
        """Implementation for benchmarking."""
        pass

    def run(self):
        """Function to launch the benchmarking.

        Return:
            True if run benchmark successfully.
        """
178
        ret = True
179
        self._start_time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
180
181
182
        try:
            ret &= self._preprocess()
            if ret:
183
                signal.signal(signal.SIGTERM, self.__signal_handler)
184
185
186
187
                for self._curr_run_index in range(self._args.run_count):
                    ret &= self._benchmark()
                if ret:
                    ret &= self.__check_result_format()
188
189
190
        except TimeoutError as e:
            self._result.set_return_code(ReturnCode.KILLED_BY_TIMEOUT)
            logger.error('Run benchmark failed - benchmark: %s, message: %s', self._name, e)
191
192
193
        except BaseException as e:
            self._result.set_return_code(ReturnCode.RUNTIME_EXCEPTION_ERROR)
            logger.error('Run benchmark failed - benchmark: {}, message: {}'.format(self._name, str(e)))
194
195
        else:
            ret &= self._postprocess()
196
        finally:
197
198
            self._end_time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
            self._result.set_timestamp(self._start_time, self._end_time)
199

200
        return ret
201

202
203
204
205
206
207
208
209
210
211
212
213
    def __signal_handler(self, signum, frame):
        """Signal handler for benchmark.

        Args:
            signum (int): Signal number.
            frame (FrameType): Timeout frame.
        """
        logger.debug('Killed by %s', signal.Signals(signum).name)
        logger.debug(''.join(traceback.format_stack(frame, 5)))
        if signum == signal.SIGTERM:
            raise TimeoutError('Killed by SIGTERM or timeout!')

214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
    def __check_result_format(self):
        """Check the validation of result object.

        Return:
            True if the result is valid.
        """
        if (not self.__check_result_type()) or (not self.__check_summarized_result()) or (not self.__check_raw_data()):
            self._result.set_return_code(ReturnCode.INVALID_BENCHMARK_RESULT)
            return False

        return True

    def __check_result_type(self):
        """Check the type of result object.

        Return:
            True if the result is instance of BenchmarkResult.
        """
        if not isinstance(self._result, BenchmarkResult):
            logger.error(
                'Invalid benchmark result type - benchmark: {}, type: {}'.format(self._name, type(self._result))
            )
            return False

        return True

240
241
242
243
244
245
246
247
248
249
    def __is_list_type(self, data, t):
        if isinstance(data, list) and all(isinstance(item, t) for item in data):
            return True
        return False

    def __is_list_list_type(self, data, t):
        if (self.__is_list_type(data, list) and all(isinstance(value, t) for item in data for value in item)):
            return True
        return False

250
251
252
253
254
255
256
    def __check_summarized_result(self):
        """Check the validation of summary result.

        Return:
            True if the summary result is instance of List[Number].
        """
        for metric in self._result.result:
257
            if not self.__is_list_type(self._result.result[metric], numbers.Number):
258
                logger.error(
259
260
                    'Invalid summarized result - benchmark: {}, metric: {}, result: {}.'.format(
                        self._name, metric, self._result.result[metric]
261
262
263
264
265
266
267
268
269
270
271
                    )
                )
                return False

        return True

    def __check_raw_data(self):
        """Check the validation of raw data.

        Return:
            True if the raw data is:
272
273
274
              instance of List[List[Number]] for BenchmarkType.MODEL.
              instance of List[str] for BenchmarkType.DOCKER.
              instance of List[List[Number]] or List[str] for BenchmarkType.MICRO.
275
276
        """
        for metric in self._result.raw_data:
277
278
279
280
281
282
283
284
285
            is_valid = True
            if self._benchmark_type == BenchmarkType.MODEL:
                is_valid = self.__is_list_list_type(self._result.raw_data[metric], numbers.Number)
            elif self._benchmark_type == BenchmarkType.DOCKER:
                is_valid = self.__is_list_type(self._result.raw_data[metric], str)
            elif self._benchmark_type == BenchmarkType.MICRO:
                is_valid = self.__is_list_type(self._result.raw_data[metric], str) or self.__is_list_list_type(
                    self._result.raw_data[metric], numbers.Number
                )
286
287
            if not is_valid:
                logger.error(
288
289
                    'Invalid raw data type - benchmark: {}, metric: {}, raw data: {}.'.format(
                        self._name, metric, self._result.raw_data[metric]
290
291
292
293
294
295
                    )
                )
                return False

        return True

296
297
298
299
300
301
302
303
304
305
306
    def _process_percentile_result(self, metric, result, reduce_type=None):
        """Function to process the percentile results.

        Args:
            metric (str): metric name which is the key.
            result (List[numbers.Number]): numerical result.
            reduce_type (ReduceType): The type of reduce function.
        """
        if len(result) > 0:
            percentile_list = ['50', '90', '95', '99', '99.9']
            for percentile in percentile_list:
307
308
309
310
311
312
313
314
                try:
                    # Prefer the newer NumPy 'method' argument; fall back to 'interpolation'
                    # for older NumPy versions that don't support 'method'.
                    val = np.percentile(result, float(percentile), method='nearest')
                except TypeError:
                    # If the 'method' argument is not supported (older NumPy), retry with 'interpolation'.
                    val = np.percentile(result, float(percentile), interpolation='nearest')
                self._result.add_result('{}_{}'.format(metric, percentile), val, reduce_type)
315

316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
    def print_env_info(self):
        """Print environments or dependencies information."""
        # TODO: will implement it when add real benchmarks in the future.
        pass

    @property
    def name(self):
        """Decoration function to access benchmark name."""
        return self._result.name

    @property
    def type(self):
        """Decoration function to access benchmark type."""
        return self._result.type

    @property
    def run_count(self):
        """Decoration function to access benchmark run_count."""
        return self._result.run_count

    @property
    def return_code(self):
        """Decoration function to access benchmark return_code."""
        return self._result.return_code

    @property
    def start_time(self):
        """Decoration function to access benchmark start_time."""
        return self._result.start_time

    @property
    def end_time(self):
        """Decoration function to access benchmark end_time."""
        return self._result.end_time

    @property
    def raw_data(self):
        """Decoration function to access benchmark raw_data."""
        return self._result.raw_data

    @property
    def result(self):
        """Decoration function to access benchmark result."""
        return self._result.result

    @property
    def serialized_result(self):
        """Decoration function to access benchmark result."""
        return self._result.to_string()
365
366
367
368
369

    @property
    def default_metric_count(self):
        """Decoration function to get the count of default metrics."""
        return self._result.default_metric_count