keras_imagenet_benchmark.py 30.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""
from __future__ import print_function

import os
19
import time
20
21

from absl import flags
22
import tensorflow as tf # pylint: disable=g-bad-import-order
23
24

from official.resnet import imagenet_main
Toby Boyd's avatar
Toby Boyd committed
25
from official.resnet.keras import keras_benchmark
26
27
28
from official.resnet.keras import keras_common
from official.resnet.keras import keras_imagenet_main

Toby Boyd's avatar
Toby Boyd committed
29
30
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
31

Toby Boyd's avatar
Toby Boyd committed
32
FLAGS = flags.FLAGS
33
34


Toby Boyd's avatar
Toby Boyd committed
35
36
class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for ResNet50 in Keras."""
37

38
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
39
40
41
42
43
    """A benchmark class.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
44
45
46
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
47
48
    """

49
    flag_methods = [
50
51
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
52
    ]
Toby Boyd's avatar
Toby Boyd committed
53

54
    self.data_dir = os.path.join(root_data_dir, 'imagenet')
55
56
    super(Resnet50KerasAccuracy, self).__init__(
        output_dir=output_dir, flag_methods=flag_methods)
57

Toby Boyd's avatar
Toby Boyd committed
58
  def benchmark_graph_8_gpu(self):
59
60
    """Test Keras model with Keras fit/dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
61
    FLAGS.num_gpus = 8
62
    FLAGS.data_dir = self.data_dir
63
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
64
    FLAGS.train_epochs = 90
65
    FLAGS.epochs_between_evals = 10
66
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
67
    FLAGS.dtype = 'fp32'
68
    FLAGS.use_tensor_lr = True
69
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
70
71

  def benchmark_8_gpu(self):
72
73
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
74
    FLAGS.num_gpus = 8
75
    FLAGS.data_dir = self.data_dir
76
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
77
    FLAGS.train_epochs = 90
78
    FLAGS.epochs_between_evals = 10
79
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
80
81
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
82
83
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
84
    FLAGS.use_tensor_lr = True
85
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
86

Reed's avatar
Reed committed
87
88
89
90
91
92
93
  def benchmark_8_gpu_fp16(self):
    """Test Keras model with eager, dist_strat, 8 GPUs, and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
94
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
95
96
97
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
98
99
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
100
    FLAGS.use_tensor_lr = True
Reed's avatar
Reed committed
101
102
103
104
105
106
107
108
109
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
110
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
111
112
113
114
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
115
116
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
117
    FLAGS.use_tensor_lr = True
Reed's avatar
Reed committed
118
119
    self._run_and_report_benchmark()

120
121
122
123
124
125
126
  def benchmark_xla_8_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs, dynamic fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
127
    FLAGS.epochs_between_evals = 10
128
129
130
131
132
133
134
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.loss_scale = 'dynamic'
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
135
    FLAGS.use_tensor_lr = True
136
137
    self._run_and_report_benchmark()

138
139
140
141
142
143
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50KerasAccuracy, self)._report_benchmark(
Toby Boyd's avatar
Toby Boyd committed
144
        stats,
145
        wall_time_sec,
Toby Boyd's avatar
Toby Boyd committed
146
147
        top_1_min=MIN_TOP_1_ACCURACY,
        top_1_max=MAX_TOP_1_ACCURACY,
148
        total_batch_size=FLAGS.batch_size,
Toby Boyd's avatar
Toby Boyd committed
149
        log_steps=100)
150
151
152
153

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)

Toby Boyd's avatar
Toby Boyd committed
154
155
156
157
158

class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
  """Resnet50 benchmarks."""

  def __init__(self, output_dir=None, default_flags=None):
159
    flag_methods = [
160
161
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
162
    ]
Toby Boyd's avatar
Toby Boyd committed
163
164
165
166
167
168

    super(Resnet50KerasBenchmarkBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=default_flags)

169
170
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
Toby Boyd's avatar
Toby Boyd committed
171
    stats = keras_imagenet_main.run(FLAGS)
172
173
174
175
176
177
178
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50KerasBenchmarkBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
Toby Boyd's avatar
Toby Boyd committed
179
180

  def benchmark_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
181
    """Test Keras model with 1 GPU, no distribution strategy."""
Toby Boyd's avatar
Toby Boyd committed
182
183
184
185
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
186
    FLAGS.distribution_strategy = 'off'
187
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
Toby Boyd's avatar
Toby Boyd committed
188
    FLAGS.batch_size = 128
189
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
190
191

  def benchmark_graph_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
192
    """Test Keras model in legacy graph mode with 1 GPU, no dist strat."""
Toby Boyd's avatar
Toby Boyd committed
193
194
195
196
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
197
    FLAGS.distribution_strategy = 'off'
198
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
199
200
    FLAGS.batch_size = 96  # BatchNorm is less efficient in legacy graph mode
                           # due to its reliance on v1 cond.
201
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
202
203

  def benchmark_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
204
    """Test Keras model with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
205
206
207
208
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
209
    FLAGS.distribution_strategy = 'default'
210
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
211
    FLAGS.batch_size = 128
212
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
213

Haoyu Zhang's avatar
Haoyu Zhang committed
214
215
216
217
218
219
220
221
222
223
224
225
  def benchmark_xla_1_gpu(self):
    """Test Keras model with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
226
  def benchmark_1_gpu_fp16(self):
227
    """Test Keras model with 1 GPU and fp16."""
Reed's avatar
Reed committed
228
229
230
231
232
233
234
235
236
237
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

238
239
240
241
242
243
244
245
246
247
248
249
250
  def benchmark_1_gpu_fp16_dynamic(self):
    """Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
251
252
253
254
255
256
257
258
259
260
261
262
263
  def benchmark_xla_1_gpu_fp16(self):
    """Test Keras model with XLA, 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

264
265
266
267
268
269
270
271
272
273
274
  def benchmark_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
275
    FLAGS.use_tensor_lr = True
276
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu_fp16_slack(self):
    """Test Keras model with XLA, 1 GPU, fp16, and tf.data's experimental_slack
       functionality."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_slack')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_data_experimental_slack = True
293
294
    self._run_and_report_benchmark()

295
296
297
298
299
300
301
302
303
304
305
306
307
308
  def benchmark_xla_1_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
309
  def benchmark_graph_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
310
    """Test Keras model in legacy graph mode with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
311
312
313
314
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
315
    FLAGS.distribution_strategy = 'default'
316
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
317
    FLAGS.batch_size = 128
318
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
319

Haoyu Zhang's avatar
Haoyu Zhang committed
320
321
322
323
324
325
326
327
328
329
330
331
  def benchmark_graph_xla_1_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

332
333
334
335
336
  def benchmark_graph_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
337
    FLAGS.dtype = 'fp16'
338
339
340
341
342
343
344
345
346
347
348
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16 and XLA."""
    self._setup()

    FLAGS.num_gpus = 1
349
    FLAGS.dtype = 'fp16'
350
351
352
353
354
355
356
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

357
358
359
360
361
362
363
364
365
366
367
368
369
370
  def benchmark_graph_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16, XLA, and manual
       config tuning.
    """
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
371
    FLAGS.use_tensor_lr = True
372
373
374
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
  def benchmark_graph_xla_1_gpu_fp16_slack(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16, XLA, and
       tf.data's experimental_slack functionality.
    """
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_slack')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_data_experimental_slack = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
392
  def benchmark_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
393
    """Test Keras model with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
394
395
396
397
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
398
    FLAGS.distribution_strategy = 'default'
399
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
400
    FLAGS.batch_size = 128 * 8  # 8 GPUs
401
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
402

403
404
  def benchmark_8_gpu_cloning(self):
    """Test Keras model with 8 GPUs and cloning."""
405
406
407
408
409
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
410
411
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_cloning')
412
413
414
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    self._run_and_report_benchmark()

415
  def benchmark_8_gpu_tweaked(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
416
    """Test Keras model with manual config tuning and 8 GPUs."""
417
418
419
420
421
422
423
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
424
    FLAGS.use_tensor_lr = True
425
    FLAGS.datasets_num_private_threads = 14
426
427
428
429
430
431
432
433
434
435
436
437
438
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

  def benchmark_8_gpu_slack(self):
    """Test Keras model with tf.data's experimental_slack and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_slack')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
439
440
    self._run_and_report_benchmark()

Haoyu Zhang's avatar
Haoyu Zhang committed
441
442
443
444
445
446
447
448
449
  def benchmark_xla_8_gpu(self):
    """Test Keras model with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
450
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
451
452
    self._run_and_report_benchmark()

453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
  def benchmark_xla_8_gpu_tweaked(self):
    """Test Keras model with manual config tuning, 8 GPUs, and XLA."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 24
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
469
  def benchmark_8_gpu_fp16(self):
470
    """Test Keras model with 8 GPUs and fp16."""
Reed's avatar
Reed committed
471
472
473
    self._setup()

    FLAGS.num_gpus = 8
474
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
475
476
477
478
479
480
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

481
482
483
484
485
486
487
488
489
490
491
492
493
  def benchmark_8_gpu_fp16_cloning(self):
    """Test Keras model with 8 GPUs, fp16 and cloning."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_cloning')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

494
  def benchmark_8_gpu_fp16_tweaked(self):
495
    """Test Keras model with 8 GPUs, fp16, and manual config tuning."""
496
497
498
499
500
501
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
502
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_tweaked')
503
    FLAGS.batch_size = 256 * 8  # 8 GPUs
504
    FLAGS.use_tensor_lr = True
505
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
506
    FLAGS.data_delay_prefetch = True
507
508
    self._run_and_report_benchmark()

509
  def benchmark_8_gpu_fp16_dynamic_tweaked(self):
510
511
512
    """Test Keras model with 8 GPUs, fp16, dynamic loss scaling, and manual
       config tuning.
    """
513
514
515
516
517
518
519
520
521
522
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
523
    FLAGS.use_tensor_lr = True
524
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
525
    FLAGS.data_delay_prefetch = True
526
527
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
528
  def benchmark_xla_8_gpu_fp16(self):
529
    """Test Keras model with XLA, 8 GPUs and fp16."""
Reed's avatar
Reed committed
530
531
532
    self._setup()

    FLAGS.num_gpus = 8
533
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
534
535
536
537
538
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
539
540
541
542
543
544
545
546
547
548
549
550
551
552
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16_cloning(self):
    """Test Keras model with XLA, 8 GPUs, fp16 and cloning."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_cloning')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
Reed's avatar
Reed committed
553
554
    self._run_and_report_benchmark()

555
556
557
558
559
560
561
562
563
564
565
  def benchmark_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
566
    FLAGS.use_tensor_lr = True
567
    # FLAGS.tf_gpu_thread_mode = 'gpu_private'
568
569
570
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

571
  def benchmark_xla_8_gpu_fp16_tweaked_optional_next(self):
572
573
574
    """Test Keras model with manual config tuning, XLA, 8 GPUs, fp16.

    This test also enables get_next_as_optional.
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tweaked_optional_next')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.use_tensor_lr = True
    # FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_delay_prefetch = True
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

592
  def benchmark_xla_8_gpu_fp16_slack(self):
593
594
595
    """Test Keras model with XLA, 8 GPUs and fp16.

    This test also enable tf.data's experimental_slack functionality.
596
597
598
599
600
601
602
603
604
605
606
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_slack')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
607
608
    self._run_and_report_benchmark()

609
610
611
612
613
614
615
616
617
618
619
620
621
  def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
622
    FLAGS.use_tensor_lr = True
623
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
624
    FLAGS.data_delay_prefetch = True
625
626
    self._run_and_report_benchmark()

627
628
629
630
631
632
633
634
635
636
637
638
  def benchmark_xla_8_gpu_fp16_tensorboard_tweaked(self):
    """Test to track Tensorboard performance overhead."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tensorboard_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
639
    FLAGS.use_tensor_lr = True
640
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
641
    FLAGS.data_delay_prefetch = True
642
643
644
    FLAGS.enable_tensorboard = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
645
  def benchmark_graph_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
646
    """Test Keras model in legacy graph mode with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
647
648
649
650
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
651
    FLAGS.distribution_strategy = 'default'
652
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
653
    FLAGS.batch_size = 128 * 8  # 8 GPUs
654
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
655

Haoyu Zhang's avatar
Haoyu Zhang committed
656
657
658
659
660
661
662
663
664
  def benchmark_graph_xla_8_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu')
665
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
666
667
    self._run_and_report_benchmark()

668
669
670
671
672
673
674
675
676
677
678
679
  def benchmark_graph_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

680
681
682
683
684
685
686
687
688
689
690
691
692
  def benchmark_graph_xla_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

693
694
695
696
697
698
699
700
701
702
703
704
  def benchmark_graph_8_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with manual config tuning, 8 GPUs
       and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
705
    FLAGS.use_tensor_lr = True
706
707
708
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

709
710
711
712
713
714
715
716
717
718
719
720
721
722
  def benchmark_graph_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with manual config tuning, XLA,
       8 GPUs and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
723
    FLAGS.use_tensor_lr = True
724
725
726
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
  def benchmark_graph_xla_8_gpu_fp16_slack(self):
    """Test Keras model in legacy graph mode with tf.data's experimental_slack
       functionality, XLA, 8 GPUs and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_slack')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
    self._run_and_report_benchmark()

744
745
746
747
748
749
750
751
752
753
754
755
  def benchmark_graph_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
756
    FLAGS.use_tensor_lr = True
757
758
759
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

760
761
762
763
764
765
766
767
768
769
770
771
  def benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
772
    FLAGS.use_tensor_lr = True
773
774
775
776
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
777
778
779
780
781
782
  def fill_report_object(self, stats):
    super(Resnet50KerasBenchmarkBase, self).fill_report_object(
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

Toby Boyd's avatar
Toby Boyd committed
783
784
785
786

class Resnet50KerasBenchmarkSynth(Resnet50KerasBenchmarkBase):
  """Resnet50 synthetic benchmark tests."""

787
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
788
789
    def_flags = {}
    def_flags['skip_eval'] = True
790
    def_flags['report_accuracy_metrics'] = False
Toby Boyd's avatar
Toby Boyd committed
791
792
793
794
    def_flags['use_synthetic_data'] = True
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

795
796
    super(Resnet50KerasBenchmarkSynth, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
Toby Boyd's avatar
Toby Boyd committed
797
798
799
800
801


class Resnet50KerasBenchmarkReal(Resnet50KerasBenchmarkBase):
  """Resnet50 real data benchmark tests."""

802
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
803
804
    def_flags = {}
    def_flags['skip_eval'] = True
805
    def_flags['report_accuracy_metrics'] = False
806
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
Toby Boyd's avatar
Toby Boyd committed
807
808
809
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

810
811
    super(Resnet50KerasBenchmarkReal, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
812
813


814
class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
815
816
817
818
  """Trivial model with real data benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    flag_methods = [
819
820
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
821
822
823
    ]
    def_flags = {}
    def_flags['skip_eval'] = True
824
    def_flags['report_accuracy_metrics'] = False
825
826
827
828
829
830
831
    def_flags['dtype'] = 'fp16'
    def_flags['enable_xla'] = True
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
    def_flags['train_steps'] = 600
    def_flags['log_steps'] = 100
    def_flags['distribution_strategy'] = 'default'

832
    super(TrivialKerasBenchmarkReal, self).__init__(
833
834
835
836
837
838
839
840
841
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=def_flags)

  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(FLAGS)
    wall_time_sec = time.time() - start_time_sec

842
    super(TrivialKerasBenchmarkReal, self)._report_benchmark(
843
844
845
846
847
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

848
849
850
851
852
853
854
855
856
857
858
  def benchmark_8_gpu_warmup(self):
    """Dummy test that runs over an epoch to warmup the machine."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_warmup')
    FLAGS.batch_size = 256
    FLAGS.train_steps = 700
    self._run_and_report_benchmark()

859
860
861
862
863
864
865
866
867
868
869
870
871
872
  def benchmark_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

873
    FLAGS.num_gpus = 1
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
    FLAGS.enable_eager = False
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_8_gpu(self):
    """Test trivial Keras model (input pipeline) with 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_8_gpu_tweaked(self):
    """Test trivial Keras model (input pipeline) with manual config tuning and
       8 GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
900
901
902
903
904
905
906
907
908
909
910
911
912
913
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

  def benchmark_8_gpu_slack(self):
    """Test trivial Keras model (input pipeline) with tf.data's
       experimental_slack and 8 GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_slack')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_data_experimental_slack = True
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu(self):
    """Test trivial Keras model (input pipeline) in legacy graph mode with 8
       GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu_tweaked(self):
    """Test trivial Keras model (input pipeline) in legacy graph mode with
       manual config tuning and 8 GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

  def fill_report_object(self, stats):
942
    super(TrivialKerasBenchmarkReal, self).fill_report_object(
943
944
945
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
946
947
948
949


if __name__ == '__main__':
  tf.test.main()