keras_imagenet_benchmark.py 38.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""
from __future__ import print_function

import os
19
import time
20
21

from absl import flags
22
import tensorflow as tf # pylint: disable=g-bad-import-order
23
24

from official.resnet import imagenet_main
Toby Boyd's avatar
Toby Boyd committed
25
from official.resnet.keras import keras_benchmark
26
27
28
from official.resnet.keras import keras_common
from official.resnet.keras import keras_imagenet_main

Toby Boyd's avatar
Toby Boyd committed
29
30
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
31

Toby Boyd's avatar
Toby Boyd committed
32
FLAGS = flags.FLAGS
33
34


Toby Boyd's avatar
Toby Boyd committed
35
36
class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for ResNet50 in Keras."""
37

38
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
39
40
41
42
43
    """A benchmark class.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
44
45
46
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
47
48
    """

49
    flag_methods = [
50
51
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
52
    ]
Toby Boyd's avatar
Toby Boyd committed
53

54
    self.data_dir = os.path.join(root_data_dir, 'imagenet')
55
56
    super(Resnet50KerasAccuracy, self).__init__(
        output_dir=output_dir, flag_methods=flag_methods)
57

Toby Boyd's avatar
Toby Boyd committed
58
  def benchmark_graph_8_gpu(self):
59
60
    """Test Keras model with Keras fit/dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
61
    FLAGS.num_gpus = 8
62
    FLAGS.data_dir = self.data_dir
63
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
64
    FLAGS.train_epochs = 90
65
    FLAGS.epochs_between_evals = 10
66
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
67
    FLAGS.dtype = 'fp32'
68
    FLAGS.use_tensor_lr = True
69
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
70
71

  def benchmark_8_gpu(self):
72
73
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
74
    FLAGS.num_gpus = 8
75
    FLAGS.data_dir = self.data_dir
76
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
77
    FLAGS.train_epochs = 90
78
    FLAGS.epochs_between_evals = 10
79
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
80
81
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
82
83
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
84
    FLAGS.use_tensor_lr = True
85
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
86

Reed's avatar
Reed committed
87
88
89
90
91
92
93
  def benchmark_8_gpu_fp16(self):
    """Test Keras model with eager, dist_strat, 8 GPUs, and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
94
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
95
96
97
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
98
99
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
100
    FLAGS.use_tensor_lr = True
Reed's avatar
Reed committed
101
102
103
104
105
106
107
108
109
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
110
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
111
112
113
114
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
115
116
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
117
    FLAGS.use_tensor_lr = True
Reed's avatar
Reed committed
118
119
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
  def benchmark_8_gpu_mlperf_like_tweaked(self):
    """Test similar to the rules for MLPerf 0.5.

    Listed below are reasons this comparison is not to the MLSpec, but this is
    still a decent directional measurement:
      - Eval is every 4 epochs and again at the end. ~2 extra times.
      - Learning rate is not tuned to hit 75%, but we know the model is correct.
      - We measure total time and MLPerf 0.5 excluded some startup time.
      - Eval is not on the total set, need to set eval batch_size where
        8*batch_size/50K is even. 250 is a good number.
      - Not sure if we are doing any extra or too few steps due to epoch bleed.
    """
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 61
    FLAGS.epochs_between_evals = 4
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mlperf_like_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.use_tensor_lr = True
143
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
Toby Boyd's avatar
Toby Boyd committed
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
    self._run_and_report_benchmark()

  def benchmark_8_gpu_mlperf_like(self):
    """Test similar to the rules for MLPerf 0.5.

    Listed below are reasons this comparison is not to the MLSpec, but this is
    still a decent directional measurement:
      - Eval is every 4 epochs and again at the end. ~2 extra times.
      - Learning rate is not tuned to hit 75%, but we know the model is correct.
      - We measure total time and MLPerf 0.5 excluded some startup time.
      - Eval is not on the total set, need to set eval batch_size where
        8*batch_size/50K is even. 250 is a good number.
      - Not sure if we are doing any extra or too few steps due to epoch bleed.
    """
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 61
    FLAGS.epochs_between_evals = 4
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mlperf_like')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    self._run_and_report_benchmark()

170
171
172
173
174
175
176
  def benchmark_xla_8_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs, dynamic fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
177
    FLAGS.epochs_between_evals = 10
178
179
180
181
182
183
184
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.loss_scale = 'dynamic'
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
185
    FLAGS.use_tensor_lr = True
186
187
    self._run_and_report_benchmark()

188
189
190
191
192
193
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50KerasAccuracy, self)._report_benchmark(
Toby Boyd's avatar
Toby Boyd committed
194
        stats,
195
        wall_time_sec,
Toby Boyd's avatar
Toby Boyd committed
196
197
        top_1_min=MIN_TOP_1_ACCURACY,
        top_1_max=MAX_TOP_1_ACCURACY,
198
        total_batch_size=FLAGS.batch_size,
Toby Boyd's avatar
Toby Boyd committed
199
        log_steps=100)
200
201
202
203

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)

Toby Boyd's avatar
Toby Boyd committed
204
205
206
207
208

class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
  """Resnet50 benchmarks."""

  def __init__(self, output_dir=None, default_flags=None):
209
    flag_methods = [
210
211
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
212
    ]
Toby Boyd's avatar
Toby Boyd committed
213
214
215
216
217
218

    super(Resnet50KerasBenchmarkBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=default_flags)

219
220
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
Toby Boyd's avatar
Toby Boyd committed
221
    stats = keras_imagenet_main.run(FLAGS)
222
    wall_time_sec = time.time() - start_time_sec
223
224
225
    # Number of logged step time entries that are excluded in performance
    # report. We keep results from last 100 batches in this case.
    warmup = (FLAGS.train_steps - 100) // FLAGS.log_steps
226
227
228
229
230

    super(Resnet50KerasBenchmarkBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
231
232
        log_steps=FLAGS.log_steps,
        warmup=warmup)
Toby Boyd's avatar
Toby Boyd committed
233
234

  def benchmark_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
235
    """Test Keras model with 1 GPU, no distribution strategy."""
Toby Boyd's avatar
Toby Boyd committed
236
237
238
239
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
240
    FLAGS.distribution_strategy = 'off'
241
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
Toby Boyd's avatar
Toby Boyd committed
242
    FLAGS.batch_size = 128
243
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
244

245
246
247
248
249
250
251
252
253
254
255
256
257
  def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
    """Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly')
    FLAGS.batch_size = 64
    self._run_and_report_benchmark()

258
259
260
261
262
263
264
265
266
267
268
269
270
271
  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
    """Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
272
  def benchmark_graph_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
273
    """Test Keras model in legacy graph mode with 1 GPU, no dist strat."""
Toby Boyd's avatar
Toby Boyd committed
274
275
276
277
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
278
    FLAGS.distribution_strategy = 'off'
279
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
280
281
    FLAGS.batch_size = 96  # BatchNorm is less efficient in legacy graph mode
                           # due to its reliance on v1 cond.
282
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
283
284

  def benchmark_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
285
    """Test Keras model with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
286
287
288
289
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
290
    FLAGS.distribution_strategy = 'default'
291
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
292
    FLAGS.batch_size = 128
293
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
294

Haoyu Zhang's avatar
Haoyu Zhang committed
295
296
297
298
299
300
301
302
303
304
305
306
  def benchmark_xla_1_gpu(self):
    """Test Keras model with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
307
  def benchmark_1_gpu_fp16(self):
308
    """Test Keras model with 1 GPU and fp16."""
Reed's avatar
Reed committed
309
310
311
312
313
314
315
316
317
318
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

319
320
321
322
323
324
325
326
327
328
329
330
331
  def benchmark_1_gpu_fp16_dynamic(self):
    """Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
332
333
334
335
336
337
338
339
340
341
342
343
344
  def benchmark_xla_1_gpu_fp16(self):
    """Test Keras model with XLA, 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

345
346
347
348
349
350
351
352
353
354
355
  def benchmark_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
356
    FLAGS.use_tensor_lr = True
357
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu_fp16_slack(self):
    """Test Keras model with XLA, 1 GPU, fp16, and tf.data's experimental_slack
       functionality."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_slack')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_data_experimental_slack = True
373
374
    self._run_and_report_benchmark()

375
376
377
378
379
380
381
382
383
384
385
386
387
388
  def benchmark_xla_1_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
389
  def benchmark_graph_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
390
    """Test Keras model in legacy graph mode with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
391
392
393
394
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
395
    FLAGS.distribution_strategy = 'default'
396
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
397
    FLAGS.batch_size = 128
398
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
399

Haoyu Zhang's avatar
Haoyu Zhang committed
400
401
402
403
404
405
406
407
408
409
410
411
  def benchmark_graph_xla_1_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

412
413
414
415
416
  def benchmark_graph_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
417
    FLAGS.dtype = 'fp16'
418
419
420
421
422
423
424
425
426
427
428
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16 and XLA."""
    self._setup()

    FLAGS.num_gpus = 1
429
    FLAGS.dtype = 'fp16'
430
431
432
433
434
435
436
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

437
438
439
440
441
442
443
444
445
446
447
448
449
450
  def benchmark_graph_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16, XLA, and manual
       config tuning.
    """
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
451
    FLAGS.use_tensor_lr = True
452
453
454
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
  def benchmark_graph_xla_1_gpu_fp16_slack(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16, XLA, and
       tf.data's experimental_slack functionality.
    """
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_slack')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_data_experimental_slack = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
472
  def benchmark_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
473
    """Test Keras model with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
474
475
476
477
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
478
    FLAGS.distribution_strategy = 'default'
479
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
480
    FLAGS.batch_size = 128 * 8  # 8 GPUs
481
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
482

483
484
  def benchmark_8_gpu_cloning(self):
    """Test Keras model with 8 GPUs and cloning."""
485
486
487
488
489
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
490
491
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_cloning')
492
493
494
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    self._run_and_report_benchmark()

495
  def benchmark_8_gpu_tweaked(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
496
    """Test Keras model with manual config tuning and 8 GPUs."""
497
498
499
500
501
502
503
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
504
    FLAGS.use_tensor_lr = True
505
    FLAGS.datasets_num_private_threads = 14
506
507
508
509
510
511
512
513
514
515
516
517
    self._run_and_report_benchmark()

  def benchmark_8_gpu_slack(self):
    """Test Keras model with tf.data's experimental_slack and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_slack')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
518
519
    self._run_and_report_benchmark()

Haoyu Zhang's avatar
Haoyu Zhang committed
520
521
522
523
524
525
526
527
528
  def benchmark_xla_8_gpu(self):
    """Test Keras model with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
529
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
530
531
    self._run_and_report_benchmark()

532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
  def benchmark_xla_8_gpu_tweaked(self):
    """Test Keras model with manual config tuning, 8 GPUs, and XLA."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 24
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
547
  def benchmark_8_gpu_fp16(self):
548
    """Test Keras model with 8 GPUs and fp16."""
Reed's avatar
Reed committed
549
550
551
    self._setup()

    FLAGS.num_gpus = 8
552
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
553
554
555
556
557
558
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

559
560
561
562
563
564
565
566
567
568
569
570
571
  def benchmark_8_gpu_fp16_cloning(self):
    """Test Keras model with 8 GPUs, fp16 and cloning."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_cloning')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

572
  def benchmark_8_gpu_fp16_tweaked(self):
573
    """Test Keras model with 8 GPUs, fp16, and manual config tuning."""
574
575
576
577
578
579
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
580
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_tweaked')
581
    FLAGS.batch_size = 256 * 8  # 8 GPUs
582
    FLAGS.use_tensor_lr = True
583
584
585
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
  def benchmark_8_gpu_fp16_cloning_tweaked(self):
    """Test Keras model with 8 GPUs, fp16, cloning, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_cloning_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

603
  def benchmark_8_gpu_fp16_dynamic_tweaked(self):
Toby Boyd's avatar
Toby Boyd committed
604
    """Test Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned."""
605
606
607
608
609
610
611
612
613
614
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
615
    FLAGS.use_tensor_lr = True
616
617
618
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

rxsang's avatar
rxsang committed
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
  def benchmark_xla_8_gpu_fp16_optional_next(self):
    """Test Keras model with XLA, 8 GPUs and fp16.

    This test also enables get_next_as_optional.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_optional_next')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
637
  def benchmark_xla_8_gpu_fp16(self):
638
    """Test Keras model with XLA, 8 GPUs and fp16."""
Reed's avatar
Reed committed
639
640
641
    self._setup()

    FLAGS.num_gpus = 8
642
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
643
644
645
646
647
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
648
649
650
651
652
653
654
655
656
657
658
659
660
661
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16_cloning(self):
    """Test Keras model with XLA, 8 GPUs, fp16 and cloning."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_cloning')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
Reed's avatar
Reed committed
662
663
    self._run_and_report_benchmark()

664
665
666
667
668
669
670
671
672
673
674
  def benchmark_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
675
    FLAGS.use_tensor_lr = True
676
677
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 48
678
679
    self._run_and_report_benchmark()

680
  def benchmark_xla_8_gpu_fp16_cloning_tweaked(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
681
    """Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning."""
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_cloning_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    # FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

rxsang's avatar
rxsang committed
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
  def benchmark_xla_8_gpu_fp16_cloning_tweaked_optional_next(self):
    """Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning.

    This test also enables get_next_as_optional.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_cloning_tweaked_optional_next')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    # FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_delay_prefetch = True
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

720
  def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
721
722
723
    """Test with manual config tuning, XLA, 8 GPUs and fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
724
725
726
727
728
729
730
731
732
733
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tweaked_delay_measure')
734
    FLAGS.batch_size = 256 * 8
735
736
737
738
739
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

740
  def benchmark_xla_8_gpu_fp16_cloning_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
741
742
743
    """Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning.

    Delay performance measurement for stable performance on 96 vCPU platforms.
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_cloning_tweaked_delay_measure')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_delay_prefetch = True
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

762
  def benchmark_xla_8_gpu_fp16_tweaked_optional_next(self):
763
764
765
    """Test Keras model with manual config tuning, XLA, 8 GPUs, fp16.

    This test also enables get_next_as_optional.
766
767
768
769
770
771
772
773
774
775
776
777
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tweaked_optional_next')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.use_tensor_lr = True
778
779
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 48
780
781
782
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

783
  def benchmark_xla_8_gpu_fp16_slack(self):
784
785
786
    """Test Keras model with XLA, 8 GPUs and fp16.

    This test also enable tf.data's experimental_slack functionality.
787
788
789
790
791
792
793
794
795
796
797
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_slack')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
798
799
    self._run_and_report_benchmark()

800
801
802
803
804
805
806
807
808
809
810
811
812
  def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
813
    FLAGS.use_tensor_lr = True
814
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
815
    FLAGS.datasets_num_private_threads = 48
816
817
    self._run_and_report_benchmark()

818
819
820
821
822
823
824
825
826
827
828
829
  def benchmark_xla_8_gpu_fp16_tensorboard_tweaked(self):
    """Test to track Tensorboard performance overhead."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tensorboard_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
830
    FLAGS.use_tensor_lr = True
831
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
832
    FLAGS.datasets_num_private_threads = 48
833
834
835
    FLAGS.enable_tensorboard = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
836
  def benchmark_graph_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
837
    """Test Keras model in legacy graph mode with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
838
839
840
841
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
842
    FLAGS.distribution_strategy = 'default'
843
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
844
    FLAGS.batch_size = 128 * 8  # 8 GPUs
845
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
846

Haoyu Zhang's avatar
Haoyu Zhang committed
847
848
849
850
851
852
853
854
855
  def benchmark_graph_xla_8_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu')
856
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
857
858
    self._run_and_report_benchmark()

859
860
861
862
863
864
865
866
867
868
869
870
  def benchmark_graph_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

871
872
873
874
875
876
877
878
879
880
881
882
883
  def benchmark_graph_xla_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

884
885
886
887
888
889
890
891
892
893
894
895
  def benchmark_graph_8_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with manual config tuning, 8 GPUs
       and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
896
    FLAGS.use_tensor_lr = True
897
898
899
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

900
901
902
903
904
905
906
907
908
909
910
911
912
913
  def benchmark_graph_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with manual config tuning, XLA,
       8 GPUs and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
914
    FLAGS.use_tensor_lr = True
915
916
917
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

918
  def benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
919
920
921
    """Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

938
  def benchmark_graph_xla_8_gpu_fp16_tweaked_optional_next(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
939
    """Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957

    This test also enables get_next_as_optional.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked_optional_next')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
  def benchmark_graph_xla_8_gpu_fp16_slack(self):
    """Test Keras model in legacy graph mode with tf.data's experimental_slack
       functionality, XLA, 8 GPUs and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_slack')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
    self._run_and_report_benchmark()

975
976
977
978
979
980
981
982
983
984
985
986
  def benchmark_graph_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
987
    FLAGS.use_tensor_lr = True
988
989
990
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

991
992
993
994
995
996
997
998
999
1000
1001
1002
  def benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
1003
    FLAGS.use_tensor_lr = True
1004
1005
1006
1007
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
1008
1009
1010
1011
1012
1013
  def fill_report_object(self, stats):
    super(Resnet50KerasBenchmarkBase, self).fill_report_object(
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

Toby Boyd's avatar
Toby Boyd committed
1014
1015
1016
1017

class Resnet50KerasBenchmarkSynth(Resnet50KerasBenchmarkBase):
  """Resnet50 synthetic benchmark tests."""

1018
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
1019
1020
    def_flags = {}
    def_flags['skip_eval'] = True
1021
    def_flags['report_accuracy_metrics'] = False
Toby Boyd's avatar
Toby Boyd committed
1022
1023
1024
1025
    def_flags['use_synthetic_data'] = True
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

1026
1027
    super(Resnet50KerasBenchmarkSynth, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
Toby Boyd's avatar
Toby Boyd committed
1028
1029
1030
1031
1032


class Resnet50KerasBenchmarkReal(Resnet50KerasBenchmarkBase):
  """Resnet50 real data benchmark tests."""

1033
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
1034
1035
    def_flags = {}
    def_flags['skip_eval'] = True
1036
    def_flags['report_accuracy_metrics'] = False
1037
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
Toby Boyd's avatar
Toby Boyd committed
1038
1039
1040
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

1041
1042
    super(Resnet50KerasBenchmarkReal, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
1043
1044


1045
class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
1046
1047
1048
1049
  """Trivial model with real data benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    flag_methods = [
1050
1051
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
1052
1053
    ]
    def_flags = {}
1054
    def_flags['use_trivial_model'] = True
1055
    def_flags['skip_eval'] = True
1056
    def_flags['report_accuracy_metrics'] = False
1057
    def_flags['use_tensor_lr'] = True
1058
1059
1060
1061
1062
1063
    def_flags['dtype'] = 'fp16'
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
    def_flags['train_steps'] = 600
    def_flags['log_steps'] = 100
    def_flags['distribution_strategy'] = 'default'

1064
    super(TrivialKerasBenchmarkReal, self).__init__(
1065
1066
1067
1068
1069
1070
1071
1072
1073
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=def_flags)

  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(FLAGS)
    wall_time_sec = time.time() - start_time_sec

1074
    super(TrivialKerasBenchmarkReal, self)._report_benchmark(
1075
1076
1077
1078
1079
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

1080
1081
1082
1083
1084
1085
1086
  def benchmark_8_gpu_warmup(self):
    """Dummy test that runs over an epoch to warmup the machine."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_warmup')
1087
    FLAGS.batch_size = 256 * 8
1088
1089
1090
    FLAGS.train_steps = 700
    self._run_and_report_benchmark()

1091
1092
1093
1094
1095
1096
  def benchmark_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
1097
    FLAGS.enable_xla = True
1098
1099
1100
1101
1102
1103
1104
1105
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

1106
    FLAGS.num_gpus = 1
1107
    FLAGS.enable_eager = False
1108
    FLAGS.enable_xla = True
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_8_gpu(self):
    """Test trivial Keras model (input pipeline) with 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
1119
    FLAGS.enable_xla = True
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_8_gpu_tweaked(self):
    """Test trivial Keras model (input pipeline) with manual config tuning and
       8 GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
1132
    FLAGS.enable_xla = True
1133
1134
1135
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1136
    FLAGS.datasets_num_private_threads = 48
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu(self):
    """Test trivial Keras model (input pipeline) in legacy graph mode with 8
       GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
1147
    FLAGS.enable_xla = True
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu_tweaked(self):
    """Test trivial Keras model (input pipeline) in legacy graph mode with
       manual config tuning and 8 GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
1160
    FLAGS.enable_xla = True
1161
1162
1163
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1164
    FLAGS.datasets_num_private_threads = 48
1165
1166
1167
    self._run_and_report_benchmark()

  def fill_report_object(self, stats):
1168
    super(TrivialKerasBenchmarkReal, self).fill_report_object(
1169
1170
1171
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
1172
1173
1174
1175


if __name__ == '__main__':
  tf.test.main()