base.py 11.8 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Module of the base class."""

6
import shlex
7
8
import signal
import traceback
9
10
11
import argparse
import numbers
from datetime import datetime
12
from operator import attrgetter
13
14
from abc import ABC, abstractmethod

15
16
import numpy as np

17
18
19
20
21
from superbench.common.utils import logger
from superbench.benchmarks import BenchmarkType, ReturnCode
from superbench.benchmarks.result import BenchmarkResult


22
23
24
25
26
27
28
29
30
31
32
class SortedMetavarTypeHelpFormatter(argparse.MetavarTypeHelpFormatter):
    """Custom HelpFormatter class for argparse which sorts option strings."""
    def add_arguments(self, actions):
        """Sort option strings before original add_arguments.

        Args:
            actions (argparse.Action): Argument parser actions.
        """
        super(SortedMetavarTypeHelpFormatter, self).add_arguments(sorted(actions, key=attrgetter('option_strings')))


33
34
35
36
37
38
39
40
41
42
class Benchmark(ABC):
    """The base class of all benchmarks."""
    def __init__(self, name, parameters=''):
        """Constructor.

        Args:
            name (str): benchmark name.
            parameters (str): benchmark parameters.
        """
        self._name = name
43
        self._argv = list(filter(None, shlex.split(parameters))) if parameters is not None else list()
44
45
46
47
48
        self._benchmark_type = None
        self._parser = argparse.ArgumentParser(
            add_help=False,
            usage=argparse.SUPPRESS,
            allow_abbrev=False,
49
            formatter_class=SortedMetavarTypeHelpFormatter,
50
        )
51
52
        # Fix optionals title in Python 3.10
        self._parser._optionals.title = 'optional arguments'
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
        self._args = None
        self._curr_run_index = 0
        self._result = None

    def add_parser_arguments(self):
        """Add the specified arguments."""
        self._parser.add_argument(
            '--run_count',
            type=int,
            default=1,
            required=False,
            help='The run count of benchmark.',
        )
        self._parser.add_argument(
            '--duration',
            type=int,
            default=0,
            required=False,
            help='The elapsed time of benchmark in seconds.',
        )
73
74
75
76
77
78
        self._parser.add_argument(
            '--log_raw_data',
            action='store_true',
            default=False,
            help='Log raw data into file instead of saving it into result object.',
        )
79
80
81
82
83
84
        self._parser.add_argument(
            '--log_flushing',
            action='store_true',
            default=False,
            help='Real-time log flushing.',
        )
85
86
87
88
89
90
91
92
93

    def get_configurable_settings(self):
        """Get all the configurable settings.

        Return:
            All configurable settings in raw string.
        """
        return self._parser.format_help().strip()

94
    def parse_args(self, ignore_invalid=False):
95
96
97
98
99
100
101
102
103
104
        """Parse the arguments.

        Return:
            ret (bool): whether parse succeed or not.
            args (argparse.Namespace): parsed arguments.
            unknown (list): unknown arguments.
        """
        try:
            args, unknown = self._parser.parse_known_args(self._argv)
        except BaseException as e:
105
106
107
108
109
110
            if ignore_invalid:
                logger.info('Missing or invliad parameters, will ignore the error and skip the args checking.')
                return True, None, []
            else:
                logger.error('Invalid argument - benchmark: {}, message: {}.'.format(self._name, str(e)))
                return False, None, []
111

112
        ret = True
113
        if len(unknown) > 0:
114
115
            logger.error(
                'Unknown arguments - benchmark: {}, unknown arguments: {}'.format(self._name, ' '.join(unknown))
116
            )
117
            ret = False
118

119
        return ret, args, unknown
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146

    def _preprocess(self):
        """Preprocess/preparation operations before the benchmarking.

        Return:
            True if _preprocess() succeed.
        """
        self.add_parser_arguments()
        ret, self._args, unknown = self.parse_args()

        if not ret:
            self._result = BenchmarkResult(self._name, self._benchmark_type, ReturnCode.INVALID_ARGUMENT)
            return False

        self._result = BenchmarkResult(
            self._name, self._benchmark_type, ReturnCode.SUCCESS, run_count=self._args.run_count
        )

        if not isinstance(self._benchmark_type, BenchmarkType):
            logger.error(
                'Invalid benchmark type - benchmark: {}, type: {}'.format(self._name, type(self._benchmark_type))
            )
            self._result.set_return_code(ReturnCode.INVALID_BENCHMARK_TYPE)
            return False

        return True

147
148
149
150
151
152
153
154
    def _postprocess(self):
        """Postprocess/cleanup operations after the benchmarking.

        Return:
            True if _postprocess() succeed.
        """
        return True

155
156
157
158
159
160
161
162
163
164
165
    @abstractmethod
    def _benchmark(self):
        """Implementation for benchmarking."""
        pass

    def run(self):
        """Function to launch the benchmarking.

        Return:
            True if run benchmark successfully.
        """
166
        ret = True
167
        self._start_time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
168
169
170
        try:
            ret &= self._preprocess()
            if ret:
171
                signal.signal(signal.SIGTERM, self.__signal_handler)
172
173
174
175
                for self._curr_run_index in range(self._args.run_count):
                    ret &= self._benchmark()
                if ret:
                    ret &= self.__check_result_format()
176
177
178
        except TimeoutError as e:
            self._result.set_return_code(ReturnCode.KILLED_BY_TIMEOUT)
            logger.error('Run benchmark failed - benchmark: %s, message: %s', self._name, e)
179
180
181
        except BaseException as e:
            self._result.set_return_code(ReturnCode.RUNTIME_EXCEPTION_ERROR)
            logger.error('Run benchmark failed - benchmark: {}, message: {}'.format(self._name, str(e)))
182
183
        else:
            ret &= self._postprocess()
184
        finally:
185
186
            self._end_time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
            self._result.set_timestamp(self._start_time, self._end_time)
187

188
        return ret
189

190
191
192
193
194
195
196
197
198
199
200
201
    def __signal_handler(self, signum, frame):
        """Signal handler for benchmark.

        Args:
            signum (int): Signal number.
            frame (FrameType): Timeout frame.
        """
        logger.debug('Killed by %s', signal.Signals(signum).name)
        logger.debug(''.join(traceback.format_stack(frame, 5)))
        if signum == signal.SIGTERM:
            raise TimeoutError('Killed by SIGTERM or timeout!')

202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
    def __check_result_format(self):
        """Check the validation of result object.

        Return:
            True if the result is valid.
        """
        if (not self.__check_result_type()) or (not self.__check_summarized_result()) or (not self.__check_raw_data()):
            self._result.set_return_code(ReturnCode.INVALID_BENCHMARK_RESULT)
            return False

        return True

    def __check_result_type(self):
        """Check the type of result object.

        Return:
            True if the result is instance of BenchmarkResult.
        """
        if not isinstance(self._result, BenchmarkResult):
            logger.error(
                'Invalid benchmark result type - benchmark: {}, type: {}'.format(self._name, type(self._result))
            )
            return False

        return True

228
229
230
231
232
233
234
235
236
237
    def __is_list_type(self, data, t):
        if isinstance(data, list) and all(isinstance(item, t) for item in data):
            return True
        return False

    def __is_list_list_type(self, data, t):
        if (self.__is_list_type(data, list) and all(isinstance(value, t) for item in data for value in item)):
            return True
        return False

238
239
240
241
242
243
244
    def __check_summarized_result(self):
        """Check the validation of summary result.

        Return:
            True if the summary result is instance of List[Number].
        """
        for metric in self._result.result:
245
            if not self.__is_list_type(self._result.result[metric], numbers.Number):
246
                logger.error(
247
248
                    'Invalid summarized result - benchmark: {}, metric: {}, result: {}.'.format(
                        self._name, metric, self._result.result[metric]
249
250
251
252
253
254
255
256
257
258
259
                    )
                )
                return False

        return True

    def __check_raw_data(self):
        """Check the validation of raw data.

        Return:
            True if the raw data is:
260
261
262
              instance of List[List[Number]] for BenchmarkType.MODEL.
              instance of List[str] for BenchmarkType.DOCKER.
              instance of List[List[Number]] or List[str] for BenchmarkType.MICRO.
263
264
        """
        for metric in self._result.raw_data:
265
266
267
268
269
270
271
272
273
            is_valid = True
            if self._benchmark_type == BenchmarkType.MODEL:
                is_valid = self.__is_list_list_type(self._result.raw_data[metric], numbers.Number)
            elif self._benchmark_type == BenchmarkType.DOCKER:
                is_valid = self.__is_list_type(self._result.raw_data[metric], str)
            elif self._benchmark_type == BenchmarkType.MICRO:
                is_valid = self.__is_list_type(self._result.raw_data[metric], str) or self.__is_list_list_type(
                    self._result.raw_data[metric], numbers.Number
                )
274
275
            if not is_valid:
                logger.error(
276
277
                    'Invalid raw data type - benchmark: {}, metric: {}, raw data: {}.'.format(
                        self._name, metric, self._result.raw_data[metric]
278
279
280
281
282
283
                    )
                )
                return False

        return True

284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
    def _process_percentile_result(self, metric, result, reduce_type=None):
        """Function to process the percentile results.

        Args:
            metric (str): metric name which is the key.
            result (List[numbers.Number]): numerical result.
            reduce_type (ReduceType): The type of reduce function.
        """
        if len(result) > 0:
            percentile_list = ['50', '90', '95', '99', '99.9']
            for percentile in percentile_list:
                self._result.add_result(
                    '{}_{}'.format(metric, percentile),
                    np.percentile(result, float(percentile), interpolation='nearest'), reduce_type
                )

300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
    def print_env_info(self):
        """Print environments or dependencies information."""
        # TODO: will implement it when add real benchmarks in the future.
        pass

    @property
    def name(self):
        """Decoration function to access benchmark name."""
        return self._result.name

    @property
    def type(self):
        """Decoration function to access benchmark type."""
        return self._result.type

    @property
    def run_count(self):
        """Decoration function to access benchmark run_count."""
        return self._result.run_count

    @property
    def return_code(self):
        """Decoration function to access benchmark return_code."""
        return self._result.return_code

    @property
    def start_time(self):
        """Decoration function to access benchmark start_time."""
        return self._result.start_time

    @property
    def end_time(self):
        """Decoration function to access benchmark end_time."""
        return self._result.end_time

    @property
    def raw_data(self):
        """Decoration function to access benchmark raw_data."""
        return self._result.raw_data

    @property
    def result(self):
        """Decoration function to access benchmark result."""
        return self._result.result

    @property
    def serialized_result(self):
        """Decoration function to access benchmark result."""
        return self._result.to_string()
349
350
351
352
353

    @property
    def default_metric_count(self):
        """Decoration function to get the count of default metrics."""
        return self._result.default_metric_count