model_base.py 15.4 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Module of the model-benchmark base class."""

6
import math
7
import time
8
import statistics
9
10
from abc import abstractmethod

11
from superbench.common.utils import logger, stdout_logger
12
from superbench.benchmarks import Precision, ModelAction, DistributedImpl, DistributedBackend, BenchmarkType, ReturnCode
13
14
15
16
from superbench.benchmarks.base import Benchmark
from superbench.benchmarks.context import Enum


17
18
19
20
21
22
23
class Optimizer(Enum):
    """The Enum class representing different optimizers."""
    SGD = 'sgd'
    ADAM = 'adam'
    ADAMW = 'adamw'


24
25
26
27
28
29
30
31
32
33
34
35
class ModelBenchmark(Benchmark):
    """The base class of E2E model benchmarks."""
    def __init__(self, name, parameters=''):
        """Constructor.

        Args:
            name (str): benchmark name.
            parameters (str): benchmark parameters.
        """
        super().__init__(name, parameters)

        self._benchmark_type = BenchmarkType.MODEL
36
37
        self._world_size = 1
        self._local_rank = None
38
        self._global_rank = None
39
40
41
        self._dataset = None
        self._dataloader = None
        self._model = None
42
        self._optimizer_type = None
43
44
45
46
        self._optimizer = None
        self._loss_fn = None
        self._target = None
        self._supported_precision = []
47
        self._gpu_available = None
48
49
50
51
52
53
54
55
56
57

    def add_parser_arguments(self):
        """Add the specified arguments."""
        super().add_parser_arguments()

        self._parser.add_argument(
            '--num_warmup',
            type=int,
            default=64,
            required=False,
58
            help='The number of warmup step.',
59
60
61
62
63
64
        )
        self._parser.add_argument(
            '--num_steps',
            type=int,
            default=2048,
            required=False,
65
            help='The number of test step.',
66
        )
67
68
69
        self._parser.add_argument(
            '--sample_count',
            type=int,
70
            default=1024,
71
72
73
            required=False,
            help='The number of data samples in dataset.',
        )
74
75
76
77
78
        self._parser.add_argument(
            '--batch_size',
            type=int,
            default=32,
            required=False,
79
            help='The number of batch size.',
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
        )
        self._parser.add_argument(
            '--precision',
            type=Precision,
            default=[Precision.FLOAT32, Precision.FLOAT16],
            nargs='+',
            required=False,
            help='Model precision. E.g. {}.'.format(' '.join(Precision.get_values())),
        )
        self._parser.add_argument(
            '--model_action',
            type=ModelAction,
            default=[ModelAction.TRAIN],
            nargs='+',
            required=False,
            help='Benchmark model process. E.g. {}.'.format(' '.join(ModelAction.get_values())),
        )
        self._parser.add_argument(
            '--distributed_impl',
            type=DistributedImpl,
            default=None,
            required=False,
102
            help='Distributed implementations. E.g. {}.'.format(' '.join(DistributedImpl.get_values())),
103
104
105
106
107
108
109
        )

        self._parser.add_argument(
            '--distributed_backend',
            type=DistributedBackend,
            default=None,
            required=False,
110
111
112
113
114
115
116
117
            help='Distributed backends. E.g. {}.'.format(' '.join(DistributedBackend.get_values())),
        )

        self._parser.add_argument(
            '--no_gpu',
            action='store_true',
            default=False,
            help='Disable GPU training.',
118
119
        )

120
121
122
123
124
125
126
        self._parser.add_argument(
            '--pin_memory',
            action='store_true',
            default=False,
            help='Enable option to pin memory in data loader.',
        )

127
128
129
130
131
132
133
        self._parser.add_argument(
            '--force_fp32',
            action='store_true',
            default=False,
            help='Enable option to use full float32 precision.',
        )

134
135
136
137
138
139
140
141
        self._parser.add_argument(
            '--log_n_steps',
            type=int,
            default=0,
            required=False,
            help='Real-time log every n steps.',
        )

142
143
144
145
146
    @abstractmethod
    def _judge_gpu_availability(self):
        """Judge GPUs' availability according to arguments and running environment."""
        pass

147
148
149
150
151
152
153
154
155
    @abstractmethod
    def _set_force_fp32(self):
        """Set the config that controls whether full float32 precision will be used.

        On Ampere or newer GPUs, pytorch and tensorflow will use TF32 instead of FP32 by default.
        We can disable TF32 execution by setting force_fp32 as True.
        """
        pass

156
157
    @abstractmethod
    def _init_distributed_setting(self):
158
159
160
161
162
        """Initialize the distributed library and bind the worker to GPU.

        Return:
            True if distributed library is initialized successfully.
        """
163
164
165
166
        pass

    @abstractmethod
    def _generate_dataset(self):
167
168
169
170
171
        """Generate dataset for benchmarking according to shape info.

        Return:
            True if dataset is created successfully.
        """
172
173
174
175
        pass

    @abstractmethod
    def _init_dataloader(self):
176
177
178
179
180
        """Initialize the dataloader.

        Return:
            True if dataloader is created successfully.
        """
181
182
183
184
185
186
187
188
        pass

    def _preprocess(self):
        """Preprocess/preparation operations before the benchmarking.

        Return:
            True if _preprocess() succeed.
        """
189
190
191
        if not super()._preprocess():
            return False

192
        self._judge_gpu_availability()
193
        self._set_force_fp32()
194
        logger.info(
195
196
            'Model placement - model: {}, GPU availablility: {}, pin memory: {}, force fp32: {}.'.format(
                self._name, self._gpu_available, self._args.pin_memory, self._args.force_fp32
197
198
            )
        )
199

200
201
202
203
        if not self._init_distributed_setting():
            self._result.set_return_code(ReturnCode.DISTRIBUTED_SETTING_INIT_FAILURE)
            return False

204
205
206
        # Set sample_count aligned with batch_size.
        self._args.sample_count = math.ceil(self._args.sample_count / self._args.batch_size) * self._args.batch_size

207
208
209
210
211
212
        if not self._generate_dataset():
            self._result.set_return_code(ReturnCode.DATASET_GENERATION_FAILURE)
            return False

        if not self._init_dataloader():
            self._result.set_return_code(ReturnCode.DATALOADER_INIT_FAILURE)
213
214
215
216
217
218
            return False

        return True

    @abstractmethod
    def _create_optimizer(self):
219
220
221
222
223
        """Create the optimzier instance used for training and wrap with distributed library if need.

        Return:
            True if optimizer instance is created successfully.
        """
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
        pass

    @abstractmethod
    def _create_model(self, precision):
        """Construct the model for benchmarking.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.
        """
        pass

    def __train(self, precision):
        """Launch the training benchmark.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.

        Return:
            True if step_times list is not empty.
        """
244
245
246
247
248
249
250
251
        if not self._create_model(precision):
            self._result.set_return_code(ReturnCode.MODEL_CREATION_FAILURE)
            return False

        if not self._create_optimizer():
            self._result.set_return_code(ReturnCode.OPTIMIZER_CREATION_FAILURE)
            return False

252
253
        # The unit of step time should be millisecond.
        step_times = self._train_step(precision)
254
255
        step_times = self.__process_model_result(ModelAction.TRAIN, precision, step_times)
        if not step_times:
256
            self._result.set_return_code(ReturnCode.INVALID_BENCHMARK_RESULT)
257
258
259
260
            return False

        logger.info(
            'Average train time - round: {}, model: {}, precision: {}, step time: {:.6f} ms.'.format(
261
                self._curr_run_index, self._name, precision, statistics.mean(step_times)
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
            )
        )

        return True

    def __inference(self, precision):
        """Launch the inference benchmark.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.

        Return:
            True if step_times list is not empty.
        """
        self._create_model(precision)
        # The unit of step time should be millisecond.
        step_times = self._inference_step(precision)
279
280
        step_times = self.__process_model_result(ModelAction.INFERENCE, precision, step_times)
        if not step_times:
281
            self._result.set_return_code(ReturnCode.INVALID_BENCHMARK_RESULT)
282
283
284
285
            return False

        logger.info(
            'Average inference time - round: {}, model: {}, precision: {}, step time: {:.6f} ms.'.format(
286
                self._curr_run_index, self._name, precision, statistics.mean(step_times)
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
            )
        )

        return True

    @abstractmethod
    def _train_step(self, precision):
        """Define the training process.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.

        Return:
            The step-time list of every training step.
        """
        pass

    @abstractmethod
    def _inference_step(self, precision):
        """Define the inference process.

        Args:
            precision (Precision): precision of model and input data,
              such as float32, float16.

        Return:
            The latency list of every inference operation.
        """
        pass

    def _benchmark(self):
        """Implementation for benchmarking.

        Return:
            True if run benchmark successfully.
        """
        precision_need_to_run = list()
        for precision in self._args.precision:
            # Check if the precision is supported or not.
            if precision not in self._supported_precision:
                logger.warning(
                    'Can not run with specified precision - model: {}, supprted precision: {}, specified precision: {}'.
                    format(self._name, ' '.join([p.value for p in self._supported_precision]), precision)
                )
            else:
                precision_need_to_run.append(precision)

        if len(precision_need_to_run) == 0:
            self._result.set_return_code(ReturnCode.NO_SUPPORTED_PRECISION)
            return False

        for precision in precision_need_to_run:
            for model_action in self._args.model_action:
340
                self._sub_benchmark_start_time = time.time()
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
                if model_action == ModelAction.TRAIN:
                    if not self.__train(precision):
                        return False
                elif model_action == ModelAction.INFERENCE:
                    if not self.__inference(precision):
                        return False
                else:
                    logger.warning(
                        'Model action has no implementation yet - model: {}, model_action: {}'.format(
                            self._name, model_action
                        )
                    )

        return True

356
    def _is_finished(self, curr_step, curr_time):
357
358
359
360
361
362
363
364
365
        """Judge whether the benchmarking should be stopped early or not.

        Args:
            curr_step (int): the current benchmarking step.
            curr_time (float): the current time in seconds got from time.time().

        Return:
            True if the benchmarking should be stopped.
        """
366
367
368
369
370
371
372
373
374
375
        total_steps = self._args.num_warmup + self._args.num_steps

        if (
            (self._args.duration > 0 and (curr_time - self._sub_benchmark_start_time) >= self._args.duration)
            or (total_steps > 0 and curr_step >= total_steps)
        ):
            return True

        return False

376
377
378
379
380
381
382
    def _sync_result(self, result):
        """Function to reduce the result to rank 0.

        Args:
            result (list): The result data to sync.

        Return:
383
            Result if reduce result data successfully, otherwise None.
384
        """
385
        return result
386

387
388
389
390
391
392
393
    def __process_model_result(self, model_action, precision, step_times):
        """Function to process raw results and save the summarized results.

        Args:
            model_action (ModelAction): train or inference.
            precision (Precision): precision of model and input data, such as float32, float16.
            step_times (list): The step time list of every training/inference step, unit is millisecond.
394
395

        Return:
396
            step_times if step_times list is not empty, otherwise None.
397
        """
398
399
400
401
402
403
        if len(step_times) == 0:
            logger.error(
                'Step time list is empty - round: {}, model: {}, model_action: {}, precision: {}.'.format(
                    self._curr_run_index, self._name, model_action, precision
                )
            )
404
            return None
405

406
407
408
        precision_metric = {'float16': 'fp16', 'float32': 'fp32', 'float64': 'fp64', 'bfloat16': 'bf16'}
        if precision.value in precision_metric.keys():
            precision = precision_metric[precision.value]
409
410
        metric_s = '{}_{}_step_time'.format(precision, model_action)
        metric_t = '{}_{}_throughput'.format(precision, model_action)
411
412
413
        # The unit of step time is millisecond, use it to calculate the throughput with the unit samples/sec.
        millisecond_per_second = 1000
        throughput = [millisecond_per_second / step_time * self._args.batch_size for step_time in step_times]
414
415
        self._result.add_raw_data(metric_s, step_times, self._args.log_raw_data)
        self._result.add_raw_data(metric_t, throughput, self._args.log_raw_data)
416
417

        if model_action == ModelAction.TRAIN:
418
419
420
421
            step_times = self._sync_result(step_times)
            if not step_times:
                return None
            if self._local_rank is None or self._global_rank == 0:
422
423
424
425
426
427
428
429
                self._result.add_result(metric_s, statistics.mean(step_times))
                throughput = [millisecond_per_second / step_time * self._args.batch_size for step_time in step_times]
                self._result.add_result(metric_t, statistics.mean(throughput))
        elif model_action == ModelAction.INFERENCE:
            self._result.add_result(metric_s, statistics.mean(step_times))
            self._result.add_result(metric_t, statistics.mean(throughput))
            self._process_percentile_result(metric_s, step_times)
            self._process_percentile_result(metric_t, throughput)
430

431
        return step_times
432

433
    @abstractmethod
434
    def _cal_params_count(self):
435
436
437
438
439
440
441
442
443
444
445
        """Calculate the parameters scale of the model.

        Return:
            The count of trainable parameters.
        """
        pass

    def print_env_info(self):
        """Print environments or dependencies information."""
        # TODO: will implement it when add real benchmarks in the future.
        pass
446
447
448
449
450
451
452
453
454
455
456
457
458

    def _log_step_time(self, curr_step, precision, duration):
        """Log step time into stdout regularly.

        Args:
            curr_step (int): the index of current step
            precision (Precision): precision of model and input data, such as float32, float16.
            duration (list): the durations of all steps
        """
        if self._args.log_n_steps and curr_step % self._args.log_n_steps == 0:
            step_time = statistics.mean(duration) if len(duration) < self._args.log_n_steps \
                else statistics.mean(duration[-self._args.log_n_steps:])
            stdout_logger.log(f'{self._name} - {precision.value}: step {curr_step}, step time {step_time}\n')