test_model_base.py 14.5 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""Tests for BenchmarkRegistry module."""

6
from superbench.benchmarks import Platform, Framework, Precision, BenchmarkRegistry, BenchmarkType, ReturnCode
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
from superbench.benchmarks.model_benchmarks import ModelBenchmark


class FakeModelBenchmark(ModelBenchmark):
    """Fake benchmark inherit from ModelBenchmark."""
    def __init__(self, name, parameters=''):
        """Constructor.

        Args:
            name: benchmark name.
            parameters: benchmark parameters.
        """
        super().__init__(name, parameters)
        self._supported_precision = [Precision.FLOAT32, Precision.FLOAT16]

    def add_parser_arguments(self):
        """Add the specified arguments."""
        super().add_parser_arguments()
        self._parser.add_argument(
            '--hidden_size',
            type=int,
            default=1024,
            required=False,
30
            help='Hidden size.',
31
32
33
34
35
36
37
        )

        self._parser.add_argument(
            '--seq_len',
            type=int,
            default=512,
            required=False,
38
            help='Sequence length.',
39
40
        )

41
42
43
44
    def _judge_gpu_availability(self):
        """Judge GPUs' availability according to arguments and running environment."""
        self._gpu_available = False

45
46
47
48
    def _set_force_fp32(self):
        """Set the config that controls whether full float32 precision will be used."""
        pass

49
50
    def _init_distributed_setting(self):
        """Initialize the distributed library and bind the worker to GPU."""
51
        return True
52
53
54

    def _generate_dataset(self):
        """Generate dataset for benchmarking according to shape info."""
55
        return True
56
57
58

    def _init_dataloader(self):
        """Initialize the distributed dataloader."""
59
        return True
60
61
62

    def _create_optimizer(self):
        """Create the optimzier instance used for training."""
63
        return True
64
65
66

    def _create_model(self, precision):
        """Construct the model for benchmarking."""
67
        return True
68
69
70
71
72
73
74
75
76
77
78
79
80

    def _train_step(self, precision):
        """Define the training process.

        Args:
            precision (str): precision of model and input data,
              such as float, half.

        Return:
            The step-time list of every training step.
        """
        duration = []
        for i in range(self._args.num_steps):
81
            duration.append(2.0)
82
83
84
85
86
87
88
89
90
91
92
93
94
95
        return duration

    def _inference_step(self, precision):
        """Define the inference process.

        Args:
            precision (str): precision of model and input data,
              such as float, half.

        Return:
            The latency list of every inference operation.
        """
        duration = []
        for i in range(self._args.num_steps):
96
            duration.append(4.0)
97
98
        return duration

99
    def _cal_params_count(self):
100
101
102
103
104
105
106
107
        """Calculate the parameters scale of the model.

        Return:
            The count of trainable parameters.
        """
        return 200


108
def create_benchmark(params='--num_steps 8'):
109
110
111
112
113
    """Register and create benchmark."""
    # Register the FakeModelBenchmark benchmark.
    BenchmarkRegistry.register_benchmark(
        'pytorch-fake-model',
        FakeModelBenchmark,
114
        parameters='--hidden_size 2',
115
116
        platform=Platform.CUDA,
    )
117
118
119
    context = BenchmarkRegistry.create_benchmark_context(
        'fake-model', platform=Platform.CUDA, parameters=params, framework=Framework.PYTORCH
    )
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
    name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(context)
    assert (name)
    (benchmark_class, predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(name, context.platform)
    assert (benchmark_class)
    return benchmark_class(name, predefine_params + ' ' + context.parameters)


def test_arguments_related_interfaces():
    """Test arguments related interfaces.

    Benchmark.add_parser_arguments(),
    Benchmark.parse_args(),
    Benchmark.get_configurable_settings()
    """
    # Positive case for parse_args().
135
    benchmark = create_benchmark('--num_steps 9')
136
137
138
139
140
    benchmark.add_parser_arguments()
    (ret, args, unknown) = benchmark.parse_args()
    assert (ret and args.num_steps == 9)

    # Negative case for parse_args() - invalid precision.
141
    benchmark = create_benchmark('--num_steps 8 --precision fp32')
142
143
144
145
146
147
148
149
150
151
    benchmark.add_parser_arguments()
    (ret, args, unknown) = benchmark.parse_args()
    assert (ret is False)

    # Test get_configurable_settings().
    settings = benchmark.get_configurable_settings()
    expected_settings = (
        """optional arguments:
  --run_count int       The run count of benchmark.
  --duration int        The elapsed time of benchmark in seconds.
152
153
  --num_warmup int      The number of warmup step.
  --num_steps int       The number of test step.
154
  --sample_count int    The number of data samples in dataset.
155
  --batch_size int      The number of batch size.
156
157
158
159
160
161
162
  --precision Precision [Precision ...]
                        Model precision. E.g. float16 float32 float64 bfloat16
                        uint8 int8 int16 int32 int64.
  --model_action ModelAction [ModelAction ...]
                        Benchmark model process. E.g. train inference.
  --distributed_impl DistributedImpl
                        Distributed implementations. E.g. ddp mirrored
163
                        multiworkermirrored parameterserver horovod.
164
  --distributed_backend DistributedBackend
165
166
                        Distributed backends. E.g. nccl mpi gloo.
  --no_gpu              Disable GPU training.
167
  --pin_memory          Enable option to pin memory in data loader.
168
  --force_fp32          Enable option to use full float32 precision.
169
170
  --hidden_size int     Hidden size.
  --seq_len int         Sequence length."""
171
172
173
174
175
176
177
    )
    assert (settings == expected_settings)


def test_preprocess():
    """Test interface Benchmark._preprocess()."""
    # Positive case for _preprocess().
178
    benchmark = create_benchmark('--num_steps 8')
179
180
181
182
183
184
185
    assert (benchmark._preprocess())
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    settings = benchmark.get_configurable_settings()
    expected_settings = (
        """optional arguments:
  --run_count int       The run count of benchmark.
  --duration int        The elapsed time of benchmark in seconds.
186
187
  --num_warmup int      The number of warmup step.
  --num_steps int       The number of test step.
188
  --sample_count int    The number of data samples in dataset.
189
  --batch_size int      The number of batch size.
190
191
192
193
194
195
196
  --precision Precision [Precision ...]
                        Model precision. E.g. float16 float32 float64 bfloat16
                        uint8 int8 int16 int32 int64.
  --model_action ModelAction [ModelAction ...]
                        Benchmark model process. E.g. train inference.
  --distributed_impl DistributedImpl
                        Distributed implementations. E.g. ddp mirrored
197
                        multiworkermirrored parameterserver horovod.
198
  --distributed_backend DistributedBackend
199
200
                        Distributed backends. E.g. nccl mpi gloo.
  --no_gpu              Disable GPU training.
201
  --pin_memory          Enable option to pin memory in data loader.
202
  --force_fp32          Enable option to use full float32 precision.
203
204
  --hidden_size int     Hidden size.
  --seq_len int         Sequence length."""
205
206
207
208
    )
    assert (settings == expected_settings)

    # Negative case for _preprocess() - invalid precision.
209
    benchmark = create_benchmark('--num_steps 8 --precision fp32')
210
211
212
213
    assert (benchmark._preprocess() is False)
    assert (benchmark.return_code == ReturnCode.INVALID_ARGUMENT)

    # Negative case for _preprocess() - invalid benchmark type.
214
    benchmark = create_benchmark('--num_steps 8 --precision float32')
215
216
217
218
219
220
221
222
223
224
    benchmark._benchmark_type = Platform.CUDA
    assert (benchmark._preprocess() is False)
    assert (benchmark.return_code == ReturnCode.INVALID_BENCHMARK_TYPE)


def test_train():
    """Test interface Benchmark.__train()."""
    benchmark = create_benchmark()
    expected_result = (
        '{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 0, '
225
        '"start_time": null, "end_time": null, "raw_data": {'
226
227
228
229
        '"fp32_train_step_time": [[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]], '
        '"fp32_train_throughput": [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]]}, '
        '"result": {"return_code": [0], "fp32_train_step_time": [2.0], "fp32_train_throughput": [16000.0]}, '
        '"reduce_op": {"fp32_train_step_time": "max", "fp32_train_throughput": "min"}}'
230
231
232
233
234
235
    )
    assert (benchmark._preprocess())
    assert (benchmark._ModelBenchmark__train(Precision.FLOAT32))
    assert (benchmark.serialized_result == expected_result)

    # Step time list is empty (simulate training failure).
236
    benchmark = create_benchmark('--num_steps 0')
237
    expected_result = (
238
        '{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 3, '
239
        '"start_time": null, "end_time": null, "raw_data": {}, "result": {"return_code": [3]}, "reduce_op": {}}'
240
241
242
243
244
245
246
247
248
249
250
    )
    assert (benchmark._preprocess())
    assert (benchmark._ModelBenchmark__train(Precision.FLOAT32) is False)
    assert (benchmark.serialized_result == expected_result)


def test_inference():
    """Test interface Benchmark.__inference()."""
    benchmark = create_benchmark()
    expected_result = (
        '{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 0, '
251
        '"start_time": null, "end_time": null, "raw_data": {'
252
253
254
255
256
        '"fp16_inference_step_time": [[4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0]], '
        '"fp16_inference_throughput": [[8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0, 8000.0]]}, '
        '"result": {"return_code": [0], '
        '"fp16_inference_step_time": [4.0], "fp16_inference_throughput": [8000.0]}, '
        '"reduce_op": {"fp16_inference_step_time": null, "fp16_inference_throughput": null}}'
257
258
259
260
261
262
    )
    assert (benchmark._preprocess())
    assert (benchmark._ModelBenchmark__inference(Precision.FLOAT16))
    assert (benchmark.serialized_result == expected_result)

    # Step time list is empty (simulate inference failure).
263
    benchmark = create_benchmark('--num_steps 0')
264
    expected_result = (
265
        '{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 3, '
266
        '"start_time": null, "end_time": null, "raw_data": {}, "result": {"return_code": [3]}, "reduce_op": {}}'
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
    )
    assert (benchmark._preprocess())
    assert (benchmark._ModelBenchmark__inference(Precision.FLOAT16) is False)
    assert (benchmark.serialized_result == expected_result)


def test_benchmark():
    """Test interface Benchmark._benchmark()."""
    # Positive case for _benchmark().
    benchmark = create_benchmark()
    benchmark._preprocess()
    assert (benchmark._benchmark())
    assert (benchmark.name == 'pytorch-fake-model')
    assert (benchmark.type == BenchmarkType.MODEL)
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    expected_raw_data = {
284
285
286
287
        'fp32_train_step_time': [[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]],
        'fp32_train_throughput': [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]],
        'fp16_train_step_time': [[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]],
        'fp16_train_throughput': [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]]
288
289
290
    }
    assert (benchmark.raw_data == expected_raw_data)
    expected_result = {
291
        'return_code': [0],
292
293
294
295
        'fp32_train_step_time': [2.0],
        'fp32_train_throughput': [16000.0],
        'fp16_train_step_time': [2.0],
        'fp16_train_throughput': [16000.0]
296
297
298
299
300
    }
    assert (benchmark.result == expected_result)

    expected_serialized_result = (
        '{"name": "pytorch-fake-model", "type": "model", "run_count": 1, "return_code": 0, "start_time": null, '
301
302
303
304
305
306
307
308
        '"end_time": null, "raw_data": {"fp32_train_step_time": [[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]], '
        '"fp32_train_throughput": [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]], '
        '"fp16_train_step_time": [[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]], '
        '"fp16_train_throughput": [[16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]]}, '
        '"result": {"return_code": [0], "fp32_train_step_time": [2.0], "fp32_train_throughput": [16000.0], '
        '"fp16_train_step_time": [2.0], "fp16_train_throughput": [16000.0]}, '
        '"reduce_op": {"fp32_train_step_time": "max", "fp32_train_throughput": "min", '
        '"fp16_train_step_time": "max", "fp16_train_throughput": "min"}}'
309
310
311
312
    )
    assert (benchmark.serialized_result == expected_serialized_result)

    # Negative case for _benchmark() - no supported precision found.
313
    benchmark = create_benchmark('--precision int16')
314
315
316
317
318
    assert (benchmark._preprocess())
    assert (benchmark._benchmark() is False)
    assert (benchmark.return_code == ReturnCode.NO_SUPPORTED_PRECISION)

    # Negative case for _benchmark() - model train failure, step time list is empty.
319
    benchmark = create_benchmark('--num_steps 0')
320
321
    assert (benchmark._preprocess())
    assert (benchmark._benchmark() is False)
322
    assert (benchmark.return_code == ReturnCode.INVALID_BENCHMARK_RESULT)
323
324

    # Negative case for _benchmark() - model inference failure, step time list is empty.
325
    benchmark = create_benchmark('--model_action inference --num_steps 0')
326
327
    assert (benchmark._preprocess())
    assert (benchmark._benchmark() is False)
328
    assert (benchmark.return_code == ReturnCode.INVALID_BENCHMARK_RESULT)
329
330
331
332
333
334
335
336
337
338
339
340
341


def test_check_result_format():
    """Test interface Benchmark.__check_result_format()."""
    # Positive case for __check_result_format().
    benchmark = create_benchmark()
    benchmark._preprocess()
    assert (benchmark._benchmark())
    assert (benchmark._Benchmark__check_result_type())
    assert (benchmark._Benchmark__check_summarized_result())
    assert (benchmark._Benchmark__check_raw_data())

    # Negative case for __check_result_format() - change List[int] to List[str].
342
    benchmark._result._BenchmarkResult__result = {'return_code': [0], 'metric1': ['2.0']}
343
344
345
346
347
348
349
350
351
    assert (benchmark._Benchmark__check_summarized_result() is False)

    # Negative case for __check_raw_data() - change List[List[int]] to List[List[str]].
    benchmark._result._BenchmarkResult__raw_data = {'metric1': [['2.0']]}
    assert (benchmark._Benchmark__check_raw_data() is False)

    # Negative case for __check_raw_data() - invalid benchmark result.
    assert (benchmark._Benchmark__check_result_format() is False)
    assert (benchmark.return_code == ReturnCode.INVALID_BENCHMARK_RESULT)