keras_imagenet_benchmark.py 68.4 KB
Newer Older
Allen Wang's avatar
Allen Wang committed
1
# Lint as: python3
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""
Hongkun Yu's avatar
Hongkun Yu committed
17
# pylint: disable=line-too-long
18
19
from __future__ import print_function

Allen Wang's avatar
Allen Wang committed
20
import json
21
import os
22
import time
23

Allen Wang's avatar
Allen Wang committed
24
25
from typing import Any, MutableMapping, Optional

26
from absl import flags
27
import tensorflow as tf  # pylint: disable=g-bad-import-order
28

29
from official.benchmark import keras_benchmark
30
from official.utils.testing import benchmark_wrappers
Allen Wang's avatar
Allen Wang committed
31
from official.vision.image_classification import classifier_trainer
32
from official.vision.image_classification.resnet import resnet_imagenet_main
33

Toby Boyd's avatar
Toby Boyd committed
34
35
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
36

Jaehong Kim's avatar
Jaehong Kim committed
37
38
39
40
41
42
43
44
45
46
MOBILENET_V1_MIN_TOP_1_ACCURACY = 0.65
MOBILENET_V1_MAX_TOP_1_ACCURACY = 0.68

# Range of top-1 accracies for model optimization techniques.
# Each item indicates (MIN_TOP_1_ACCURACY, MAX_TOP_1_ACCURACY).
MODEL_OPTIMIZATION_TOP_1_ACCURACY = {
    'RESNET50_FINETUNE_PRUNING': (0.76, 0.77),
    'MOBILENET_V1_FINETUNE_PRUNING': (0.67, 0.68),
}

Toby Boyd's avatar
Toby Boyd committed
47
FLAGS = flags.FLAGS
48
49


Allen Wang's avatar
Allen Wang committed
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
def _get_classifier_parameters(
    num_gpus: int = 0,
    builder: str = 'records',
    skip_eval: bool = False,
    distribution_strategy: str = 'mirrored',
    per_replica_batch_size: int = 128,
    epochs: int = 90,
    steps: int = 0,
    epochs_between_evals: int = 1,
    dtype: str = 'float32',
    enable_xla: bool = False,
    run_eagerly: bool = False,
    gpu_thread_mode: Optional[str] = None,
    dataset_num_private_threads: Optional[int] = None,
    loss_scale: Optional[str] = None) -> MutableMapping[str, Any]:
  """Gets classifier trainer's ResNet parameters."""
  return {
      'runtime': {
          'num_gpus': num_gpus,
          'distribution_strategy': distribution_strategy,
          'run_eagerly': run_eagerly,
          'enable_xla': enable_xla,
          'dataset_num_private_threads': dataset_num_private_threads,
          'gpu_thread_mode': gpu_thread_mode,
          'loss_scale': loss_scale,
      },
      'train_dataset': {
          'builder': builder,
          'use_per_replica_batch_size': True,
          'batch_size': per_replica_batch_size,
          'image_size': 224,
          'dtype': dtype,
      },
      'validation_dataset': {
          'builder': builder,
          'batch_size': per_replica_batch_size,
          'use_per_replica_batch_size': True,
          'image_size': 224,
          'dtype': dtype,
      },
      'train': {
          'epochs': epochs,
          'steps': steps,
          'callbacks': {
              'enable_tensorboard': False,
              'enable_checkpoint_and_export': False,
              'enable_time_history': True,
          },
      },
      'evaluation': {
          'epochs_between_evals': epochs_between_evals,
          'skip_eval': skip_eval,
      },
  }


Toby Boyd's avatar
Toby Boyd committed
106
107
class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for ResNet50 in Keras."""
108

Allen Wang's avatar
Allen Wang committed
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
  def __init__(self,
               output_dir: Optional[str] = None,
               root_data_dir: Optional[str] = None,
               **kwargs):
    """A benchmark class.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
    """

    flag_methods = [classifier_trainer.define_classifier_flags]

    self.data_dir = os.path.join(root_data_dir, 'imagenet')
126
    super(Resnet50KerasAccuracy, self).__init__(
Allen Wang's avatar
Allen Wang committed
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
        output_dir=output_dir, flag_methods=flag_methods)

  @benchmark_wrappers.enable_runtime_flags
  def _run_and_report_benchmark(
      self,
      experiment_name: str,
      top_1_min: float = MIN_TOP_1_ACCURACY,
      top_1_max: float = MAX_TOP_1_ACCURACY,
      num_gpus: int = 0,
      distribution_strategy: str = 'mirrored',
      per_replica_batch_size: int = 128,
      epochs: int = 90,
      steps: int = 0,
      epochs_between_evals: int = 1,
      dtype: str = 'float32',
      enable_xla: bool = False,
      run_eagerly: bool = False,
      gpu_thread_mode: Optional[str] = None,
      dataset_num_private_threads: Optional[int] = None,
      loss_scale: Optional[str] = None):
    """Runs and reports the benchmark given the provided configuration."""
    FLAGS.model_type = 'resnet'
    FLAGS.dataset = 'imagenet'
    FLAGS.mode = 'train_and_eval'
    FLAGS.data_dir = self.data_dir
    FLAGS.model_dir = self._get_model_dir(experiment_name)
    parameters = _get_classifier_parameters(
        num_gpus=num_gpus,
        distribution_strategy=distribution_strategy,
        per_replica_batch_size=per_replica_batch_size,
        epochs=epochs,
        steps=steps,
        epochs_between_evals=epochs_between_evals,
        dtype=dtype,
        enable_xla=enable_xla,
        run_eagerly=run_eagerly,
        gpu_thread_mode=gpu_thread_mode,
        dataset_num_private_threads=dataset_num_private_threads,
        loss_scale=loss_scale)
    FLAGS.params_override = json.dumps(parameters)
    total_batch_size = num_gpus * per_replica_batch_size

    start_time_sec = time.time()
    stats = classifier_trainer.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

173
    super(Resnet50KerasAccuracy, self)._report_benchmark(
Allen Wang's avatar
Allen Wang committed
174
175
176
177
178
179
180
181
182
        stats,
        wall_time_sec,
        top_1_min=top_1_min,
        top_1_max=top_1_max,
        total_batch_size=total_batch_size,
        log_steps=100)

  def benchmark_8_gpu(self):
    """Tests Keras model with eager, dist_strat and 8 GPUs."""
Hongkun Yu's avatar
Hongkun Yu committed
183
184
    self._setup()
    self._run_and_report_benchmark(
Allen Wang's avatar
Allen Wang committed
185
186
187
188
189
190
191
192
193
194
        experiment_name='benchmark_8_gpu',
        num_gpus=8,
        per_replica_batch_size=128,
        epochs=90,
        epochs_between_evals=10,
        dtype='float32',
        dataset_num_private_threads=14)

  def benchmark_8_gpu_fp16(self):
    """Tests Keras model with eager, dist_strat, 8 GPUs, and fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
195
196
    self._setup()
    self._run_and_report_benchmark(
Allen Wang's avatar
Allen Wang committed
197
198
199
200
201
202
203
204
205
206
        experiment_name='benchmark_8_gpu_fp16',
        num_gpus=8,
        per_replica_batch_size=256,
        epochs=90,
        epochs_between_evals=10,
        dtype='float16',
        gpu_thread_mode='gpu_private')

  def benchmark_xla_8_gpu_fp16(self):
    """Tests Keras model with XLA, eager, dist_strat, 8 GPUs and fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
207
208
    self._setup()
    self._run_and_report_benchmark(
Allen Wang's avatar
Allen Wang committed
209
210
211
212
213
214
215
216
217
218
219
        experiment_name='benchmark_xla_8_gpu_fp16',
        num_gpus=8,
        per_replica_batch_size=256,
        epochs=90,
        epochs_between_evals=10,
        dtype='float16',
        enable_xla=True,
        gpu_thread_mode='gpu_private')

  def benchmark_xla_8_gpu_fp16_dynamic(self):
    """Tests Keras model with XLA, eager, dist_strat, 8 GPUs, dynamic fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
220
221
    self._setup()
    self._run_and_report_benchmark(
Allen Wang's avatar
Allen Wang committed
222
223
224
225
226
227
228
229
230
231
232
233
234
235
        experiment_name='benchmark_xla_8_gpu_fp16_dynamic',
        top_1_min=0.736,
        num_gpus=8,
        per_replica_batch_size=256,
        epochs=90,
        epochs_between_evals=10,
        dtype='float16',
        loss_scale='dynamic',
        gpu_thread_mode='gpu_private')

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)


Jaehong Kim's avatar
Jaehong Kim committed
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
class MobilenetV1KerasAccuracy(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for MobilenetV1 in Keras."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    """A benchmark class.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
    """

    flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]

    self.data_dir = os.path.join(root_data_dir, 'imagenet')
    super(MobilenetV1KerasAccuracy, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags={
            'model': 'mobilenet',
            'optimizer': 'mobilenet_default',
            'initial_learning_rate_per_sample': 0.00039,
        })

  def benchmark_8_gpu(self):
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 128 * 8
    FLAGS.train_epochs = 90
    FLAGS.epochs_between_evals = 10
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
    self._run_and_report_benchmark()

  @benchmark_wrappers.enable_runtime_flags
  def _run_and_report_benchmark(self,
                                top_1_min=MOBILENET_V1_MIN_TOP_1_ACCURACY,
                                top_1_max=MOBILENET_V1_MAX_TOP_1_ACCURACY):
    start_time_sec = time.time()
    stats = resnet_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(MobilenetV1KerasAccuracy, self)._report_benchmark(
        stats,
        wall_time_sec,
        top_1_min=top_1_min,
        top_1_max=top_1_max,
        total_batch_size=FLAGS.batch_size,
        log_steps=100)

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)


Allen Wang's avatar
Allen Wang committed
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
class Resnet50KerasClassifierBenchmarkBase(keras_benchmark.KerasBenchmark):
  """Resnet50 (classifier_trainer) benchmarks."""

  def __init__(self, output_dir=None, default_flags=None,
               tpu=None, dataset_builder='records', train_epochs=1,
               train_steps=110, data_dir=None):
    flag_methods = [classifier_trainer.define_classifier_flags]

    self.dataset_builder = dataset_builder
    self.train_epochs = train_epochs
    self.train_steps = train_steps
    self.data_dir = data_dir

    super(Resnet50KerasClassifierBenchmarkBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=default_flags,
        tpu=tpu)

  @benchmark_wrappers.enable_runtime_flags
  def _run_and_report_benchmark(
      self,
      experiment_name: str,
      skip_steps: Optional[int] = None,
      top_1_min: float = MIN_TOP_1_ACCURACY,
      top_1_max: float = MAX_TOP_1_ACCURACY,
      num_gpus: int = 0,
      distribution_strategy: str = 'mirrored',
      per_replica_batch_size: int = 128,
      epochs_between_evals: int = 1,
      dtype: str = 'float32',
      enable_xla: bool = False,
      run_eagerly: bool = False,
      gpu_thread_mode: Optional[str] = None,
      dataset_num_private_threads: Optional[int] = None,
      loss_scale: Optional[str] = None):
    """Runs and reports the benchmark given the provided configuration."""
    FLAGS.model_type = 'resnet'
    FLAGS.dataset = 'imagenet'
    FLAGS.mode = 'train_and_eval'
    FLAGS.data_dir = self.data_dir
    FLAGS.model_dir = self._get_model_dir(experiment_name)
    parameters = _get_classifier_parameters(
        builder=self.dataset_builder,
        skip_eval=True,
        num_gpus=num_gpus,
        distribution_strategy=distribution_strategy,
        per_replica_batch_size=per_replica_batch_size,
        epochs=self.train_epochs,
        steps=self.train_steps,
        epochs_between_evals=epochs_between_evals,
        dtype=dtype,
        enable_xla=enable_xla,
        gpu_thread_mode=gpu_thread_mode,
        dataset_num_private_threads=dataset_num_private_threads,
        loss_scale=loss_scale)
    FLAGS.params_override = json.dumps(parameters)
    total_batch_size = num_gpus * per_replica_batch_size

    start_time_sec = time.time()
    stats = classifier_trainer.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec
    # Number of logged step time entries that are excluded in performance
    # report. We keep results from last 100 batches, or skip the steps based on
    # input skip_steps.
    warmup = (skip_steps or (self.train_steps - 100)) // FLAGS.log_steps

    super(Resnet50KerasClassifierBenchmarkBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        total_batch_size=total_batch_size,
        log_steps=FLAGS.log_steps,
        warmup=warmup,
        start_time_sec=start_time_sec)

  def benchmark_1_gpu_no_dist_strat(self):
    """Tests Keras model with 1 GPU, no distribution strategy."""
Hongkun Yu's avatar
Hongkun Yu committed
374
    self._setup()
Allen Wang's avatar
Allen Wang committed
375
376
377
378
379
380
381
382
    self._run_and_report_benchmark(
        experiment_name='benchmark_1_gpu_no_dist_strat',
        num_gpus=1,
        distribution_strategy='off',
        per_replica_batch_size=128)

  def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
    """Tests Keras model with 1 GPU, no distribution strategy, run eagerly."""
Hongkun Yu's avatar
Hongkun Yu committed
383
    self._setup()
Allen Wang's avatar
Allen Wang committed
384
385
386
387
388
389
390
391
392
    self._run_and_report_benchmark(
        experiment_name='benchmark_1_gpu_no_dist_strat_run_eagerly',
        num_gpus=1,
        run_eagerly=True,
        distribution_strategy='off',
        per_replica_batch_size=64)

  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
    """Tests with 1 GPU, no distribution strategy, fp16, run eagerly."""
Hongkun Yu's avatar
Hongkun Yu committed
393
    self._setup()
Allen Wang's avatar
Allen Wang committed
394
395
396
397
398
399
400
401
402
403
    self._run_and_report_benchmark(
        experiment_name='benchmark_1_gpu_no_dist_strat_run_eagerly_fp16',
        num_gpus=1,
        run_eagerly=True,
        distribution_strategy='off',
        dtype='float16',
        per_replica_batch_size=128)

  def benchmark_1_gpu(self):
    """Tests Keras model with 1 GPU."""
Hongkun Yu's avatar
Hongkun Yu committed
404
    self._setup()
Allen Wang's avatar
Allen Wang committed
405
406
407
408
409
410
411
412
    self._run_and_report_benchmark(
        experiment_name='benchmark_1_gpu',
        num_gpus=1,
        distribution_strategy='one_device',
        per_replica_batch_size=128)

  def benchmark_xla_1_gpu(self):
    """Tests Keras model with XLA and 1 GPU."""
Hongkun Yu's avatar
Hongkun Yu committed
413
    self._setup()
Allen Wang's avatar
Allen Wang committed
414
415
416
417
418
419
420
421
422
    self._run_and_report_benchmark(
        experiment_name='benchmark_xla_1_gpu',
        num_gpus=1,
        enable_xla=True,
        distribution_strategy='one_device',
        per_replica_batch_size=128)

  def benchmark_1_gpu_fp16(self):
    """Tests Keras model with 1 GPU and fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
423
    self._setup()
Allen Wang's avatar
Allen Wang committed
424
425
426
427
428
429
430
431
432
    self._run_and_report_benchmark(
        experiment_name='benchmark_1_gpu_fp16',
        num_gpus=1,
        distribution_strategy='one_device',
        dtype='float16',
        per_replica_batch_size=256)

  def benchmark_1_gpu_fp16_dynamic(self):
    """Tests Keras model with 1 GPU, fp16, and dynamic loss scaling."""
Hongkun Yu's avatar
Hongkun Yu committed
433
    self._setup()
Allen Wang's avatar
Allen Wang committed
434
435
436
437
438
439
440
441
442
443
    self._run_and_report_benchmark(
        experiment_name='benchmark_1_gpu_fp16_dynamic',
        num_gpus=1,
        distribution_strategy='one_device',
        dtype='float16',
        per_replica_batch_size=256,
        loss_scale='dynamic')

  def benchmark_xla_1_gpu_fp16(self):
    """Tests Keras model with XLA, 1 GPU and fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
444
    self._setup()
Allen Wang's avatar
Allen Wang committed
445
446
447
448
449
450
451
452
453
454
    self._run_and_report_benchmark(
        experiment_name='benchmark_xla_1_gpu_fp16',
        num_gpus=1,
        enable_xla=True,
        distribution_strategy='one_device',
        dtype='float16',
        per_replica_batch_size=256)

  def benchmark_xla_1_gpu_fp16_tweaked(self):
    """Tests Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
Hongkun Yu's avatar
Hongkun Yu committed
455
    self._setup()
Allen Wang's avatar
Allen Wang committed
456
457
458
459
460
461
462
463
464
465
466
    self._run_and_report_benchmark(
        experiment_name='benchmark_xla_1_gpu_fp16_tweaked',
        num_gpus=1,
        enable_xla=True,
        distribution_strategy='one_device',
        dtype='float16',
        per_replica_batch_size=256,
        gpu_thread_mode='gpu_private')

  def benchmark_xla_1_gpu_fp16_dynamic(self):
    """Tests Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
Hongkun Yu's avatar
Hongkun Yu committed
467
    self._setup()
Allen Wang's avatar
Allen Wang committed
468
469
470
471
472
473
474
475
476
477
478
    self._run_and_report_benchmark(
        experiment_name='benchmark_xla_1_gpu_fp16_dynamic',
        num_gpus=1,
        enable_xla=True,
        distribution_strategy='one_device',
        dtype='float16',
        per_replica_batch_size=256,
        loss_scale='dynamic')

  def benchmark_graph_1_gpu(self):
    """Tests Keras model in legacy graph mode with 1 GPU."""
Hongkun Yu's avatar
Hongkun Yu committed
479
    self._setup()
Allen Wang's avatar
Allen Wang committed
480
481
482
483
484
485
486
487
    self._run_and_report_benchmark(
        experiment_name='benchmark_graph_1_gpu',
        num_gpus=1,
        distribution_strategy='one_device',
        per_replica_batch_size=128)

  def benchmark_graph_xla_1_gpu(self):
    """Tests Keras model in legacy graph mode with XLA and 1 GPU."""
Hongkun Yu's avatar
Hongkun Yu committed
488
    self._setup()
Allen Wang's avatar
Allen Wang committed
489
490
491
492
493
494
495
496
497
    self._run_and_report_benchmark(
        experiment_name='benchmark_graph_xla_1_gpu',
        num_gpus=1,
        enable_xla=True,
        distribution_strategy='one_device',
        per_replica_batch_size=128)

  def benchmark_8_gpu(self):
    """Tests Keras model with 8 GPUs."""
Hongkun Yu's avatar
Hongkun Yu committed
498
    self._setup()
Allen Wang's avatar
Allen Wang committed
499
500
501
502
503
504
505
506
    self._run_and_report_benchmark(
        experiment_name='benchmark_8_gpu',
        num_gpus=8,
        distribution_strategy='mirrored',
        per_replica_batch_size=128)

  def benchmark_8_gpu_tweaked(self):
    """Tests Keras model with manual config tuning and 8 GPUs."""
Hongkun Yu's avatar
Hongkun Yu committed
507
    self._setup()
Allen Wang's avatar
Allen Wang committed
508
509
510
511
512
513
514
515
516
    self._run_and_report_benchmark(
        experiment_name='benchmark_8_gpu_tweaked',
        num_gpus=8,
        distribution_strategy='mirrored',
        per_replica_batch_size=128,
        dataset_num_private_threads=14)

  def benchmark_xla_8_gpu(self):
    """Tests Keras model with XLA and 8 GPUs."""
Hongkun Yu's avatar
Hongkun Yu committed
517
    self._setup()
Allen Wang's avatar
Allen Wang committed
518
519
520
521
522
523
524
525
526
    self._run_and_report_benchmark(
        experiment_name='benchmark_xla_8_gpu',
        num_gpus=8,
        enable_xla=True,
        distribution_strategy='mirrored',
        per_replica_batch_size=128)

  def benchmark_xla_8_gpu_tweaked(self):
    """Tests Keras model with manual config tuning, 8 GPUs, and XLA."""
Hongkun Yu's avatar
Hongkun Yu committed
527
    self._setup()
Allen Wang's avatar
Allen Wang committed
528
529
530
531
532
533
534
535
536
537
538
    self._run_and_report_benchmark(
        experiment_name='benchmark_xla_8_gpu_tweaked',
        num_gpus=8,
        enable_xla=True,
        distribution_strategy='mirrored',
        per_replica_batch_size=128,
        gpu_thread_mode='gpu_private',
        dataset_num_private_threads=24)

  def benchmark_8_gpu_fp16(self):
    """Tests Keras model with 8 GPUs and fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
539
    self._setup()
Allen Wang's avatar
Allen Wang committed
540
541
542
543
544
545
546
547
548
    self._run_and_report_benchmark(
        experiment_name='benchmark_8_gpu_fp16',
        num_gpus=8,
        dtype='float16',
        distribution_strategy='mirrored',
        per_replica_batch_size=256)

  def benchmark_8_gpu_fp16_tweaked(self):
    """Tests Keras model with 8 GPUs, fp16, and manual config tuning."""
Hongkun Yu's avatar
Hongkun Yu committed
549
    self._setup()
Allen Wang's avatar
Allen Wang committed
550
551
552
553
554
555
556
557
558
559
    self._run_and_report_benchmark(
        experiment_name='benchmark_8_gpu_fp16_tweaked',
        num_gpus=8,
        dtype='float16',
        distribution_strategy='mirrored',
        per_replica_batch_size=256,
        gpu_thread_mode='gpu_private')

  def benchmark_8_gpu_fp16_dynamic_tweaked(self):
    """Tests Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned."""
Hongkun Yu's avatar
Hongkun Yu committed
560
    self._setup()
Allen Wang's avatar
Allen Wang committed
561
562
563
564
565
566
567
568
569
570
571
    self._run_and_report_benchmark(
        experiment_name='benchmark_8_gpu_fp16_dynamic_tweaked',
        num_gpus=8,
        dtype='float16',
        distribution_strategy='mirrored',
        per_replica_batch_size=256,
        loss_scale='dynamic',
        gpu_thread_mode='gpu_private')

  def benchmark_xla_8_gpu_fp16(self):
    """Tests Keras model with XLA, 8 GPUs and fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
572
    self._setup()
Allen Wang's avatar
Allen Wang committed
573
574
575
576
577
578
579
580
581
582
    self._run_and_report_benchmark(
        experiment_name='benchmark_xla_8_gpu_fp16',
        dtype='float16',
        num_gpus=8,
        enable_xla=True,
        distribution_strategy='mirrored',
        per_replica_batch_size=256)

  def benchmark_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
583
    self._setup()
Allen Wang's avatar
Allen Wang committed
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
    self._run_and_report_benchmark(
        experiment_name='benchmark_xla_8_gpu_fp16_tweaked',
        dtype='float16',
        num_gpus=8,
        enable_xla=True,
        distribution_strategy='mirrored',
        per_replica_batch_size=256,
        gpu_thread_mode='gpu_private',
        dataset_num_private_threads=48)

  def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
    """Tests with manual config tuning, XLA, 8 GPUs and fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
    """
Hongkun Yu's avatar
Hongkun Yu committed
599
    self._setup()
Allen Wang's avatar
Allen Wang committed
600
601
602
603
604
605
606
607
608
609
610
611
    self._run_and_report_benchmark(
        experiment_name='benchmark_xla_8_gpu_fp16_tweaked_delay_measure',
        dtype='float16',
        num_gpus=8,
        enable_xla=True,
        distribution_strategy='mirrored',
        per_replica_batch_size=256,
        gpu_thread_mode='gpu_private',
        steps=310)

  def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Tests Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
612
    self._setup()
Allen Wang's avatar
Allen Wang committed
613
614
615
616
617
618
619
620
621
622
623
624
625
    self._run_and_report_benchmark(
        experiment_name='benchmark_xla_8_gpu_fp16_dynamic_tweaked',
        dtype='float16',
        num_gpus=8,
        enable_xla=True,
        distribution_strategy='mirrored',
        per_replica_batch_size=256,
        gpu_thread_mode='gpu_private',
        loss_scale='dynamic',
        dataset_num_private_threads=48)

  def benchmark_graph_8_gpu(self):
    """Tests Keras model in legacy graph mode with 8 GPUs."""
Hongkun Yu's avatar
Hongkun Yu committed
626
    self._setup()
Allen Wang's avatar
Allen Wang committed
627
628
629
630
631
632
633
634
    self._run_and_report_benchmark(
        experiment_name='benchmark_graph_8_gpu',
        num_gpus=8,
        distribution_strategy='mirrored',
        per_replica_batch_size=128)

  def benchmark_graph_xla_8_gpu(self):
    """Tests Keras model in legacy graph mode with XLA and 8 GPUs."""
Hongkun Yu's avatar
Hongkun Yu committed
635
    self._setup()
Allen Wang's avatar
Allen Wang committed
636
637
638
639
640
641
642
643
644
    self._run_and_report_benchmark(
        experiment_name='benchmark_graph_xla_8_gpu',
        num_gpus=8,
        enable_xla=True,
        distribution_strategy='mirrored',
        per_replica_batch_size=128)

  def benchmark_2x2_tpu_fp16(self):
    """Test Keras model with 2x2 TPU, fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
645
    self._setup()
Allen Wang's avatar
Allen Wang committed
646
647
648
649
650
651
652
653
    self._run_and_report_benchmark(
        experiment_name='benchmark_2x2_tpu_fp16',
        dtype='bfloat16',
        distribution_strategy='tpu',
        per_replica_batch_size=128)

  def benchmark_4x4_tpu_fp16(self):
    """Test Keras model with 4x4 TPU, fp16."""
Hongkun Yu's avatar
Hongkun Yu committed
654
    self._setup()
Allen Wang's avatar
Allen Wang committed
655
656
657
658
659
660
661
662
663
664
665
666
667
    self._run_and_report_benchmark(
        experiment_name='benchmark_4x4_tpu_fp16',
        dtype='bfloat16',
        distribution_strategy='tpu',
        per_replica_batch_size=128)

  def fill_report_object(self, stats):
    super(Resnet50KerasClassifierBenchmarkBase, self).fill_report_object(
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)


Toby Boyd's avatar
Toby Boyd committed
668
669
670
class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
  """Resnet50 benchmarks."""

David Chen's avatar
David Chen committed
671
  def __init__(self, output_dir=None, default_flags=None, tpu=None):
672
    flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]
Toby Boyd's avatar
Toby Boyd committed
673
674
675
676

    super(Resnet50KerasBenchmarkBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
David Chen's avatar
David Chen committed
677
678
        default_flags=default_flags,
        tpu=tpu)
Toby Boyd's avatar
Toby Boyd committed
679

680
  @benchmark_wrappers.enable_runtime_flags
681
  def _run_and_report_benchmark(self, skip_steps=None):
682
    start_time_sec = time.time()
683
    stats = resnet_imagenet_main.run(FLAGS)
684
    wall_time_sec = time.time() - start_time_sec
685
    # Number of logged step time entries that are excluded in performance
686
687
688
    # report. We keep results from last 100 batches, or skip the steps based on
    # input skip_steps.
    warmup = (skip_steps or (FLAGS.train_steps - 100)) // FLAGS.log_steps
689
690
691
692
693

    super(Resnet50KerasBenchmarkBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
694
        log_steps=FLAGS.log_steps,
David Chen's avatar
David Chen committed
695
696
        warmup=warmup,
        start_time_sec=start_time_sec)
Toby Boyd's avatar
Toby Boyd committed
697
698

  def benchmark_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
699
    """Test Keras model with 1 GPU, no distribution strategy."""
Toby Boyd's avatar
Toby Boyd committed
700
701
702
703
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
704
    FLAGS.distribution_strategy = 'off'
705
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
Toby Boyd's avatar
Toby Boyd committed
706
    FLAGS.batch_size = 128
707
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
708

709
710
711
712
713
714
715
716
717
718
719
720
721
  def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
    """Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly')
    FLAGS.batch_size = 64
    self._run_and_report_benchmark()

722
723
724
725
726
727
728
729
730
731
732
733
734
735
  def benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked(self):
    """Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.explicit_gpu_placement = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked')
    FLAGS.batch_size = 64
    self._run_and_report_benchmark()

736
737
738
739
740
741
742
743
744
745
746
747
748
749
  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
    """Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked(self):
    """Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.explicit_gpu_placement = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
765
  def benchmark_graph_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
766
    """Test Keras model in legacy graph mode with 1 GPU, no dist strat."""
Toby Boyd's avatar
Toby Boyd committed
767
768
769
770
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
771
    FLAGS.distribution_strategy = 'off'
772
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
773
    FLAGS.batch_size = 96  # BatchNorm is less efficient in legacy graph mode
774
    # due to its reliance on v1 cond.
775
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
776
777

  def benchmark_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
778
    """Test Keras model with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
779
780
781
782
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
783
    FLAGS.distribution_strategy = 'one_device'
784
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
785
    FLAGS.batch_size = 128
786
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
787

788
789
790
791
792
793
  def benchmark_1_gpu_amp(self):
    """Test Keras model with 1 GPU with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
Vinh Nguyen's avatar
Vinh Nguyen committed
794
    FLAGS.dtype = 'fp16'
795
    FLAGS.fp16_implementation = 'graph_rewrite'
796
    FLAGS.distribution_strategy = 'one_device'
797
798
799
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_amp')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()
800

Haoyu Zhang's avatar
Haoyu Zhang committed
801
802
803
804
805
806
807
  def benchmark_xla_1_gpu(self):
    """Test Keras model with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
808
    FLAGS.distribution_strategy = 'one_device'
Haoyu Zhang's avatar
Haoyu Zhang committed
809
810
811
812
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

813
814
815
816
817
818
  def benchmark_xla_1_gpu_amp(self):
    """Test Keras model with XLA and 1 GPU with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
Vinh Nguyen's avatar
Vinh Nguyen committed
819
    FLAGS.dtype = 'fp16'
820
    FLAGS.fp16_implementation = 'graph_rewrite'
821
    FLAGS.enable_xla = True
822
    FLAGS.distribution_strategy = 'one_device'
823
824
825
826
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_amp')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
827
  def benchmark_1_gpu_fp16(self):
828
    """Test Keras model with 1 GPU and fp16."""
Reed's avatar
Reed committed
829
830
831
832
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
833
    FLAGS.distribution_strategy = 'one_device'
Reed's avatar
Reed committed
834
835
836
837
838
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

839
840
841
842
843
844
  def benchmark_1_gpu_fp16_dynamic(self):
    """Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
845
    FLAGS.distribution_strategy = 'one_device'
846
847
848
849
850
851
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
852
853
854
855
856
857
858
  def benchmark_xla_1_gpu_fp16(self):
    """Test Keras model with XLA, 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
859
    FLAGS.distribution_strategy = 'one_device'
Reed's avatar
Reed committed
860
861
862
863
864
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

865
866
867
868
869
870
871
  def benchmark_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
872
    FLAGS.distribution_strategy = 'one_device'
873
874
875
876
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
877
878
    self._run_and_report_benchmark()

879
880
881
882
883
884
885
  def benchmark_xla_1_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
886
    FLAGS.distribution_strategy = 'one_device'
887
888
889
890
891
892
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
893
  def benchmark_graph_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
894
    """Test Keras model in legacy graph mode with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
895
896
897
898
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
899
    FLAGS.distribution_strategy = 'one_device'
900
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
901
    FLAGS.batch_size = 128
902
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
903

Haoyu Zhang's avatar
Haoyu Zhang committed
904
905
906
907
908
909
910
  def benchmark_graph_xla_1_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
911
    FLAGS.distribution_strategy = 'one_device'
Haoyu Zhang's avatar
Haoyu Zhang committed
912
913
914
915
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

916
917
918
919
920
  def benchmark_graph_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
921
    FLAGS.dtype = 'fp16'
922
    FLAGS.enable_eager = False
923
    FLAGS.distribution_strategy = 'one_device'
924
925
926
927
928
929
930
931
932
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16 and XLA."""
    self._setup()

    FLAGS.num_gpus = 1
933
    FLAGS.dtype = 'fp16'
934
935
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
936
    FLAGS.distribution_strategy = 'one_device'
937
938
939
940
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

941
  def benchmark_graph_xla_1_gpu_fp16_tweaked(self):
942
    """Test Keras model in legacy graph with 1 GPU, fp16, XLA, and tuning."""
943
944
945
946
947
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
948
    FLAGS.distribution_strategy = 'one_device'
949
950
951
952
953
954
955
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
956
  def benchmark_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
957
    """Test Keras model with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
958
959
960
961
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
962
    FLAGS.distribution_strategy = 'mirrored'
963
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
964
    FLAGS.batch_size = 128 * 8  # 8 GPUs
965
    self._run_and_report_benchmark()
966

967
968
969
970
971
972
  def benchmark_8_gpu_amp(self):
    """Test Keras model with 8 GPUs with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
Vinh Nguyen's avatar
Vinh Nguyen committed
973
    FLAGS.dtype = 'fp16'
974
    FLAGS.fp16_implementation = 'graph_rewrite'
975
    FLAGS.distribution_strategy = 'mirrored'
976
977
978
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()
979

980
  def benchmark_8_gpu_tweaked(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
981
    """Test Keras model with manual config tuning and 8 GPUs."""
982
983
984
985
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
986
    FLAGS.distribution_strategy = 'mirrored'
987
988
989
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    FLAGS.datasets_num_private_threads = 14
990
991
    self._run_and_report_benchmark()

Haoyu Zhang's avatar
Haoyu Zhang committed
992
993
994
995
996
997
998
  def benchmark_xla_8_gpu(self):
    """Test Keras model with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
999
    FLAGS.distribution_strategy = 'mirrored'
Haoyu Zhang's avatar
Haoyu Zhang committed
1000
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
1001
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
1002
1003
    self._run_and_report_benchmark()

1004
1005
1006
1007
1008
1009
  def benchmark_xla_8_gpu_amp(self):
    """Test Keras model with XLA and 8 GPUs with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
Vinh Nguyen's avatar
Vinh Nguyen committed
1010
    FLAGS.dtype = 'fp16'
1011
    FLAGS.fp16_implementation = 'graph_rewrite'
1012
    FLAGS.enable_xla = True
1013
    FLAGS.distribution_strategy = 'mirrored'
1014
1015
1016
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_amp')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()
1017

1018
1019
1020
1021
1022
1023
1024
  def benchmark_xla_8_gpu_tweaked(self):
    """Test Keras model with manual config tuning, 8 GPUs, and XLA."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
1025
    FLAGS.distribution_strategy = 'mirrored'
1026
1027
1028
1029
1030
1031
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 24
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
1032
  def benchmark_8_gpu_fp16(self):
1033
    """Test Keras model with 8 GPUs and fp16."""
Reed's avatar
Reed committed
1034
1035
1036
    self._setup()

    FLAGS.num_gpus = 8
1037
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
1038
    FLAGS.enable_eager = True
1039
    FLAGS.distribution_strategy = 'mirrored'
Reed's avatar
Reed committed
1040
1041
1042
1043
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

1044
  def benchmark_8_gpu_fp16_tweaked(self):
1045
    """Test Keras model with 8 GPUs, fp16, and manual config tuning."""
1046
1047
1048
1049
1050
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
1051
    FLAGS.distribution_strategy = 'mirrored'
1052
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_tweaked')
1053
1054
1055
1056
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

1057
  def benchmark_8_gpu_fp16_dynamic_tweaked(self):
Toby Boyd's avatar
Toby Boyd committed
1058
    """Test Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned."""
1059
1060
1061
1062
1063
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
1064
    FLAGS.distribution_strategy = 'mirrored'
1065
1066
1067
1068
1069
1070
1071
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
1072
  def benchmark_xla_8_gpu_fp16(self):
1073
    """Test Keras model with XLA, 8 GPUs and fp16."""
Reed's avatar
Reed committed
1074
1075
1076
    self._setup()

    FLAGS.num_gpus = 8
1077
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
1078
1079
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
1080
    FLAGS.distribution_strategy = 'mirrored'
Reed's avatar
Reed committed
1081
1082
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
1083
1084
    self._run_and_report_benchmark()

1085
1086
1087
1088
1089
1090
1091
1092
  def benchmark_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
1093
    FLAGS.distribution_strategy = 'mirrored'
1094
1095
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
1096
1097
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 48
1098
1099
    self._run_and_report_benchmark()

1100
  def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
1101
1102
1103
    """Test with manual config tuning, XLA, 8 GPUs and fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
1104
1105
1106
1107
1108
1109
1110
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
1111
    FLAGS.distribution_strategy = 'mirrored'
1112
1113
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tweaked_delay_measure')
1114
    FLAGS.batch_size = 256 * 8
1115
1116
1117
1118
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

1119
1120
1121
1122
1123
1124
1125
1126
  def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
1127
    FLAGS.distribution_strategy = 'mirrored'
1128
1129
1130
1131
1132
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1133
    FLAGS.datasets_num_private_threads = 48
1134
1135
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
1136
  def benchmark_graph_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
1137
    """Test Keras model in legacy graph mode with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
1138
1139
1140
1141
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
1142
    FLAGS.distribution_strategy = 'mirrored'
1143
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
1144
    FLAGS.batch_size = 128 * 8  # 8 GPUs
1145
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
1146

Haoyu Zhang's avatar
Haoyu Zhang committed
1147
1148
1149
1150
1151
1152
1153
  def benchmark_graph_xla_8_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
1154
    FLAGS.distribution_strategy = 'mirrored'
Haoyu Zhang's avatar
Haoyu Zhang committed
1155
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu')
1156
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
1157
1158
    self._run_and_report_benchmark()

1159
1160
1161
1162
1163
1164
1165
  def benchmark_graph_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
1166
    FLAGS.distribution_strategy = 'mirrored'
1167
1168
1169
1170
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

1171
1172
1173
1174
1175
1176
1177
1178
  def benchmark_graph_xla_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
1179
    FLAGS.distribution_strategy = 'mirrored'
1180
1181
1182
1183
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

1184
  def benchmark_graph_8_gpu_fp16_tweaked(self):
1185
    """Test Keras model in legacy graph mode, tuning, 8 GPUs, and FP16."""
1186
1187
1188
1189
1190
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
1191
    FLAGS.distribution_strategy = 'mirrored'
1192
1193
1194
1195
1196
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

1197
  def benchmark_graph_xla_8_gpu_fp16_tweaked(self):
1198
    """Test Keras model in legacy graph tuning, XLA_FP16, 8 GPUs and fp16."""
1199
1200
1201
1202
1203
1204
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
1205
    FLAGS.distribution_strategy = 'mirrored'
1206
1207
1208
1209
1210
1211
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

1212
  def benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
1213
1214
1215
    """Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
1216
1217
1218
1219
1220
1221
1222
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
1223
    FLAGS.distribution_strategy = 'mirrored'
1224
1225
1226
1227
1228
1229
1230
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

1231
1232
1233
1234
1235
1236
1237
  def benchmark_graph_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
1238
    FLAGS.distribution_strategy = 'mirrored'
1239
1240
1241
1242
1243
1244
1245
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

1246
1247
1248
1249
1250
1251
1252
1253
  def benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
1254
    FLAGS.distribution_strategy = 'mirrored'
1255
1256
1257
1258
1259
1260
1261
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

David Chen's avatar
David Chen committed
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
  def benchmark_2x2_tpu_fp16(self):
    """Test Keras model with 2x2 TPU, fp16."""
    self._setup()

    FLAGS.dtype = 'bf16'
    FLAGS.distribution_strategy = 'tpu'
    FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_fp16')
    FLAGS.batch_size = 1024
    self._run_and_report_benchmark()

  def benchmark_4x4_tpu_fp16(self):
    """Test Keras model with 4x4 TPU, fp16."""
    self._setup()

    FLAGS.dtype = 'bf16'
    FLAGS.distribution_strategy = 'tpu'
    FLAGS.model_dir = self._get_model_dir('benchmark_4x4_tpu_fp16')
    FLAGS.batch_size = 4096
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
1282
1283
1284
1285
1286
1287
  def fill_report_object(self, stats):
    super(Resnet50KerasBenchmarkBase, self).fill_report_object(
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

Toby Boyd's avatar
Toby Boyd committed
1288

Allen Wang's avatar
Allen Wang committed
1289
class Resnet50KerasBenchmarkSynth(Resnet50KerasClassifierBenchmarkBase):
Toby Boyd's avatar
Toby Boyd committed
1290
1291
  """Resnet50 synthetic benchmark tests."""

David Chen's avatar
David Chen committed
1292
  def __init__(self, output_dir=None, root_data_dir=None, tpu=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
1293
1294
1295
    def_flags = {}
    def_flags['log_steps'] = 10

1296
    super(Resnet50KerasBenchmarkSynth, self).__init__(
Allen Wang's avatar
Allen Wang committed
1297
1298
        output_dir=output_dir, default_flags=def_flags, tpu=tpu,
        dataset_builder='synthetic', train_epochs=1, train_steps=110)
Toby Boyd's avatar
Toby Boyd committed
1299
1300


Allen Wang's avatar
Allen Wang committed
1301
class Resnet50KerasBenchmarkReal(Resnet50KerasClassifierBenchmarkBase):
Toby Boyd's avatar
Toby Boyd committed
1302
1303
  """Resnet50 real data benchmark tests."""

David Chen's avatar
David Chen committed
1304
  def __init__(self, output_dir=None, root_data_dir=None, tpu=None, **kwargs):
Hongkun Yu's avatar
Hongkun Yu committed
1305
    data_dir = os.path.join(root_data_dir, 'imagenet')
Toby Boyd's avatar
Toby Boyd committed
1306
1307
1308
    def_flags = {}
    def_flags['log_steps'] = 10

1309
    super(Resnet50KerasBenchmarkReal, self).__init__(
Allen Wang's avatar
Allen Wang committed
1310
1311
1312
        output_dir=output_dir, default_flags=def_flags, tpu=tpu,
        dataset_builder='records', train_epochs=1, train_steps=110,
        data_dir=data_dir)
1313
1314


1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
class Resnet50KerasBenchmarkRemoteData(Resnet50KerasBenchmarkBase):
  """Resnet50 real data (stored in remote storage) benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    def_flags = {}
    def_flags['skip_eval'] = True
    def_flags['report_accuracy_metrics'] = False
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
    # Defining multiple epochs overrides the train_steps setting in benchmarks.
    def_flags['train_epochs'] = 2
    # Cache dataset so performance is stable after the first epoch.
    def_flags['training_dataset_cache'] = True
    def_flags['log_steps'] = 100
1328
1329
1330
1331
    # Note that for single GPU and pure eager tests which are less likely to be
    # input bound and more stable, these tests will run for shorter time by
    # overriding FLAGS.train_epochs, train_seteps, log_steps in benchmark
    # methods, and skip_steps in _run_and_report_benchmark().
1332
1333
1334
1335

    super(Resnet50KerasBenchmarkRemoteData, self).__init__(
        output_dir=output_dir, default_flags=def_flags)

1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
  def _override_flags_to_run_test_shorter(self):
    FLAGS.train_epochs = 1
    FLAGS.train_steps = 300
    FLAGS.log_steps = 10

  def benchmark_1_gpu_no_dist_strat(self):
    """Test Keras model with 1 GPU, no distribution strategy."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
    FLAGS.batch_size = 128
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
    """Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly')
    FLAGS.batch_size = 64
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked(self):
    """Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.explicit_gpu_placement = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked')
    FLAGS.batch_size = 64
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
    """Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 128
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked(self):
    """Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.explicit_gpu_placement = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 128
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_graph_1_gpu_no_dist_strat(self):
    """Test Keras model in legacy graph mode with 1 GPU, no dist strat."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
    FLAGS.batch_size = 96  # BatchNorm is less efficient in legacy graph mode
    # due to its reliance on v1 cond.
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_1_gpu(self):
    """Test Keras model with 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
    FLAGS.batch_size = 128
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_1_gpu_amp(self):
    """Test Keras model with 1 GPU with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.dtype = 'fp16'
    FLAGS.fp16_implementation = 'graph_rewrite'
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_amp')
    FLAGS.batch_size = 256
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu(self):
    """Test Keras model with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
    FLAGS.batch_size = 128
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu_amp(self):
    """Test Keras model with XLA and 1 GPU with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.dtype = 'fp16'
    FLAGS.fp16_implementation = 'graph_rewrite'
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_amp')
    FLAGS.batch_size = 256
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_1_gpu_fp16(self):
    """Test Keras model with 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_1_gpu_fp16_dynamic(self):
    """Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu_fp16(self):
    """Test Keras model with XLA, 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_graph_1_gpu(self):
    """Test Keras model in legacy graph mode with 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
    FLAGS.batch_size = 128
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu')
    FLAGS.batch_size = 128
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_graph_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16 and XLA."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph with 1 GPU, fp16, XLA, and tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'one_device'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._override_flags_to_run_test_shorter()
    self._run_and_report_benchmark()

1619
  @benchmark_wrappers.enable_runtime_flags
1620
  def _run_and_report_benchmark(self):
1621
1622
1623
1624
1625
1626
1627
1628
    if FLAGS.num_gpus == 1 or FLAGS.run_eagerly:
      # For single GPU and pure eager tests which are less likely to be input
      # bound and more stable, run for shorter time and use the default
      # skip_steps.
      skip_steps = None
    else:
      # skip the first epoch for performance measurement.
      skip_steps = 600
1629
    super(Resnet50KerasBenchmarkRemoteData,
1630
          self)._run_and_report_benchmark(skip_steps=skip_steps)
1631
1632


1633
class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
1634
1635
1636
  """Trivial model with real data benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
1637
    flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]
Toby Boyd's avatar
Toby Boyd committed
1638

1639
    def_flags = {}
1640
    def_flags['use_trivial_model'] = True
1641
    def_flags['skip_eval'] = True
1642
    def_flags['report_accuracy_metrics'] = False
1643
    def_flags['dtype'] = 'fp16'
1644
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
1645
1646
    def_flags['train_steps'] = 600
    def_flags['log_steps'] = 100
1647
    def_flags['distribution_strategy'] = 'mirrored'
1648

1649
    super(TrivialKerasBenchmarkReal, self).__init__(
1650
1651
1652
1653
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=def_flags)

1654
  @benchmark_wrappers.enable_runtime_flags
1655
1656
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
1657
    stats = resnet_imagenet_main.run(FLAGS)
1658
1659
    wall_time_sec = time.time() - start_time_sec

1660
    super(TrivialKerasBenchmarkReal, self)._report_benchmark(
1661
1662
1663
1664
1665
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

1666
1667
1668
1669
1670
1671
1672
  def benchmark_8_gpu_warmup(self):
    """Dummy test that runs over an epoch to warmup the machine."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_warmup')
1673
    FLAGS.batch_size = 256 * 8
1674
1675
1676
    FLAGS.train_steps = 700
    self._run_and_report_benchmark()

1677
  def fill_report_object(self, stats):
1678
    super(TrivialKerasBenchmarkReal, self).fill_report_object(
1679
1680
1681
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
1682
1683


1684
1685
1686
1687
class Resnet50MultiWorkerKerasAccuracy(keras_benchmark.KerasBenchmark):
  """Resnet50 distributed accuracy tests with multiple workers."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Allen Wang's avatar
Allen Wang committed
1688
    flag_methods = [classifier_trainer.define_imagenet_keras_flags]
1689
    self.data_dir = os.path.join(root_data_dir, 'imagenet')
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
    super(Resnet50MultiWorkerKerasAccuracy, self).__init__(
        output_dir=output_dir, flag_methods=flag_methods)

  def _benchmark_common(self, eager, num_workers, all_reduce_alg):
    """Common to all benchmarks in this class."""
    self._setup()

    num_gpus = 8
    FLAGS.num_gpus = num_gpus
    FLAGS.data_dir = self.data_dir
    FLAGS.train_epochs = 90
    FLAGS.epochs_between_evals = 10
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = eager
    FLAGS.enable_xla = False
    FLAGS.distribution_strategy = 'multi_worker_mirrored'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1707
    FLAGS.datasets_num_private_threads = 32
1708
1709
1710
1711
1712
1713
1714
1715
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_{}_8_gpu_{}_worker_fp16_{}_tweaked'.format(
            'eager' if eager else 'graph', num_workers, all_reduce_alg))
    FLAGS.batch_size = 256 * num_gpus * num_workers
    FLAGS.all_reduce_alg = all_reduce_alg

    self._run_and_report_benchmark()

1716
  @benchmark_wrappers.enable_runtime_flags
1717
1718
1719
1720
  def _run_and_report_benchmark(self,
                                top_1_min=MIN_TOP_1_ACCURACY,
                                top_1_max=MAX_TOP_1_ACCURACY):
    start_time_sec = time.time()
Allen Wang's avatar
Allen Wang committed
1721
    stats = classifier_trainer.run(flags.FLAGS)
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50MultiWorkerKerasAccuracy, self)._report_benchmark(
        stats,
        wall_time_sec,
        top_1_min=top_1_min,
        top_1_max=top_1_max,
        total_batch_size=FLAGS.batch_size,
        log_steps=100)

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)

  def benchmark_eager_8_gpu_2_workers_fp16_ring_tweaked(self):
    """Eager, 8 GPUs per worker, 2 workers, fp16, ring all-reduce."""
    self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='ring')

  def benchmark_eager_8_gpu_2_workers_fp16_nccl_tweaked(self):
    """Eager, 8 GPUs per worker, 2 workers, fp16, nccl all-reduce."""
    self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='nccl')

  def benchmark_eager_8_gpu_8_workers_fp16_ring_tweaked(self):
    """Eager, 8 GPUs per worker, 8 workers, fp16, ring all-reduce."""
    self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='ring')

  def benchmark_eager_8_gpu_8_workers_fp16_nccl_tweaked(self):
    """Eager, 8 GPUs per worker, 8 workers, fp16, nccl all-reduce."""
    self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='nccl')


1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
class Resnet50MultiWorkerKerasBenchmark(Resnet50KerasBenchmarkBase):
  """Resnet50 distributed benchmark tests with multiple workers."""

  def __init__(self, output_dir=None, default_flags=None):
    super(Resnet50MultiWorkerKerasBenchmark, self).__init__(
        output_dir=output_dir, default_flags=default_flags)

  def _benchmark_common(self, eager, num_workers, all_reduce_alg):
    """Common to all benchmarks in this class."""
    self._setup()

    num_gpus = 8
    FLAGS.num_gpus = num_gpus
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = eager
    FLAGS.enable_xla = False
    FLAGS.distribution_strategy = 'multi_worker_mirrored'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1770
    FLAGS.datasets_num_private_threads = 32
1771
    FLAGS.model_dir = self._get_model_dir(
1772
1773
        'benchmark_{}_8_gpu_{}_worker_fp16_{}_tweaked'.format(
            'eager' if eager else 'graph', num_workers, all_reduce_alg))
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
    FLAGS.batch_size = 256 * num_gpus * num_workers
    FLAGS.all_reduce_alg = all_reduce_alg

    self._run_and_report_benchmark()

  def benchmark_eager_8_gpu_1_worker_fp16_ring_tweaked(self):
    """Eager, 8 GPUs per worker, 1 worker, fp16, ring all-reduce."""
    self._benchmark_common(eager=True, num_workers=1, all_reduce_alg='ring')

  def benchmark_eager_8_gpu_1_worker_fp16_nccl_tweaked(self):
    """Eager, 8 GPUs per worker, 1 worker, fp16, nccl all-reduce."""
    self._benchmark_common(eager=True, num_workers=1, all_reduce_alg='nccl')

  def benchmark_eager_8_gpu_2_workers_fp16_ring_tweaked(self):
    """Eager, 8 GPUs per worker, 2 workers, fp16, ring all-reduce."""
    self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='ring')

  def benchmark_eager_8_gpu_2_workers_fp16_nccl_tweaked(self):
    """Eager, 8 GPUs per worker, 2 workers, fp16, nccl all-reduce."""
    self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='nccl')

  def benchmark_eager_8_gpu_8_workers_fp16_ring_tweaked(self):
    """Eager, 8 GPUs per worker, 8 workers, fp16, ring all-reduce."""
    self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='ring')

  def benchmark_eager_8_gpu_8_workers_fp16_nccl_tweaked(self):
    """Eager, 8 GPUs per worker, 8 workers, fp16, nccl all-reduce."""
    self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='nccl')


Ayush Dubey's avatar
Ayush Dubey committed
1804
class Resnet50MultiWorkerKerasBenchmarkSynth(Resnet50MultiWorkerKerasBenchmark):
1805
  """Resnet50 multi-worker synthetic data benchmark tests."""
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    def_flags = {}
    def_flags['skip_eval'] = True
    def_flags['report_accuracy_metrics'] = False
    def_flags['use_synthetic_data'] = True
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

    super(Resnet50MultiWorkerKerasBenchmarkSynth, self).__init__(
        output_dir=output_dir, default_flags=def_flags)


1819
1820
1821
1822
1823
1824
1825
class Resnet50MultiWorkerKerasBenchmarkReal(Resnet50MultiWorkerKerasBenchmark):
  """Resnet50 multi-worker real data benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    def_flags = {}
    def_flags['skip_eval'] = True
    def_flags['report_accuracy_metrics'] = False
1826
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
1827
1828
1829
1830
1831
1832
1833
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

    super(Resnet50MultiWorkerKerasBenchmarkReal, self).__init__(
        output_dir=output_dir, default_flags=def_flags)


Jaehong Kim's avatar
Jaehong Kim committed
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
# TODO(kimjaehong): It also should be also cover other metheods of model
# optimization techniques. In that time, this class will change to something
# like 'KerasModelOptimizationAccuracyBase'.
class KerasPruningAccuracyBase(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for pruning method."""

  def __init__(self,
               output_dir=None,
               root_data_dir=None,
               default_flags=None,
               **kwargs):
    """A accuracy benchmark class for pruning method.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
      default_flags: default flags
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
    """
    if default_flags is None:
      default_flags = {}
    default_flags['pruning_method'] = 'polynomial_decay'
    default_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')

    flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]

    super(KerasPruningAccuracyBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=default_flags,
        **kwargs)

  def benchmark_8_gpu(self):
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.batch_size = 32 * 8
    FLAGS.train_epochs = 90
    FLAGS.epochs_between_evals = 10
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
    self._run_and_report_benchmark()

  @benchmark_wrappers.enable_runtime_flags
  def _run_and_report_benchmark(self,
                                top_1_min=MODEL_OPTIMIZATION_TOP_1_ACCURACY[
                                    'RESNET50_FINETUNE_PRUNING'][0],
                                top_1_max=MODEL_OPTIMIZATION_TOP_1_ACCURACY[
                                    'RESNET50_FINETUNE_PRUNING'][1]):
    start_time_sec = time.time()
    stats = resnet_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(KerasPruningAccuracyBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        top_1_min=top_1_min,
        top_1_max=top_1_max,
        total_batch_size=FLAGS.batch_size,
        log_steps=100)


class MobilenetV1KerasPruningAccuracy(KerasPruningAccuracyBase):
  """Benchmark accuracy tests for MobilenetV1 with pruning method."""

  def __init__(self, root_data_dir=None, **kwargs):
    default_flags = {
        'model': 'mobilenet',
        'optimizer': 'mobilenet_default',
        'initial_learning_rate_per_sample': 0.00007,
        'pretrained_filepath': tf.train.latest_checkpoint(
            os.path.join(root_data_dir, 'mobilenet_v1')),
        'pruning_begin_step': 0,
        'pruning_end_step': 100000,
        'pruning_initial_sparsity': 0.0,
        'pruning_final_sparsity': 0.5,
        'pruning_frequency': 100,
    }
    super(MobilenetV1KerasPruningAccuracy, self).__init__(
        root_data_dir=root_data_dir,
        default_flags=default_flags,
        **kwargs)

  def _run_and_report_benchmark(self):
    super(MobilenetV1KerasPruningAccuracy, self)._run_and_report_benchmark(
        top_1_min=\
        MODEL_OPTIMIZATION_TOP_1_ACCURACY['MOBILENET_V1_FINETUNE_PRUNING'][0],
        top_1_max=\
        MODEL_OPTIMIZATION_TOP_1_ACCURACY['MOBILENET_V1_FINETUNE_PRUNING'][1])


class Resnet50KerasPruningAccuracy(KerasPruningAccuracyBase):
  """Benchmark accuracy tests for resnet50 with pruning method."""

  def __init__(self, root_data_dir=None, **kwargs):
    default_flags = {
        'model': 'resnet50_v1.5',
        'optimizer': 'mobilenet_default',
        'initial_learning_rate_per_sample': 0.0000039,
        'pretrained_filepath': tf.train.latest_checkpoint(
            os.path.join(root_data_dir, 'resnet50')),
        'pruning_begin_step': 0,
        'pruning_end_step': 50000,
        'pruning_initial_sparsity': 0.0,
        'pruning_final_sparsity': 0.5,
        'pruning_frequency': 100,
    }
    super(Resnet50KerasPruningAccuracy, self).__init__(
        root_data_dir=root_data_dir,
        default_flags=default_flags,
        **kwargs)

  def _run_and_report_benchmark(self):
    super(Resnet50KerasPruningAccuracy, self)._run_and_report_benchmark(
        top_1_min=\
        MODEL_OPTIMIZATION_TOP_1_ACCURACY['RESNET50_FINETUNE_PRUNING'][0],
        top_1_max=\
        MODEL_OPTIMIZATION_TOP_1_ACCURACY['RESNET50_FINETUNE_PRUNING'][1])


class KerasPruningBenchmarkRealBase(Resnet50KerasBenchmarkBase):
  """Pruning method benchmarks."""

  def __init__(self, root_data_dir=None, default_flags=None, **kwargs):
    if default_flags is None:
      default_flags = {}
    default_flags.update({
        'skip_eval': True,
        'report_accuracy_metrics': False,
        'data_dir': os.path.join(root_data_dir, 'imagenet'),
        'train_steps': 110,
        'log_steps': 10,
        'pruning_method': 'polynomial_decay',
        'pruning_begin_step': 0,
        'pruning_end_step': 50000,
        'pruning_initial_sparsity': 0,
        'pruning_final_sparsity': 0.5,
        'pruning_frequency': 100,
    })
    super(KerasPruningBenchmarkRealBase, self).__init__(
        default_flags=default_flags, **kwargs)


class MobilenetV1KerasPruningBenchmarkReal(KerasPruningBenchmarkRealBase):
  """Pruning method benchmarks for MobilenetV1."""

  def __init__(self, **kwargs):
    default_flags = {
        'model': 'mobilenet',
        'optimizer': 'mobilenet_default',
    }
    super(MobilenetV1KerasPruningBenchmarkReal, self).__init__(
        default_flags=default_flags, **kwargs)


class Resnet50KerasPruningBenchmarkReal(KerasPruningBenchmarkRealBase):
  """Pruning method benchmarks for resnet50."""

  def __init__(self, **kwargs):
    default_flags = {
        'model': 'resnet50_v1.5',
        'optimizer': 'mobilenet_default',
    }
    super(Resnet50KerasPruningBenchmarkReal, self).__init__(
        default_flags=default_flags, **kwargs)


2004
2005
if __name__ == '__main__':
  tf.test.main()