keras_imagenet_benchmark.py 22.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""
from __future__ import print_function

import os
19
import time
20
21

from absl import flags
22
import tensorflow as tf # pylint: disable=g-bad-import-order
23
24

from official.resnet import imagenet_main
Toby Boyd's avatar
Toby Boyd committed
25
from official.resnet.keras import keras_benchmark
26
27
28
from official.resnet.keras import keras_common
from official.resnet.keras import keras_imagenet_main

Toby Boyd's avatar
Toby Boyd committed
29
30
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
31

Toby Boyd's avatar
Toby Boyd committed
32
FLAGS = flags.FLAGS
33
34


Toby Boyd's avatar
Toby Boyd committed
35
36
class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for ResNet50 in Keras."""
37

38
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
39
40
41
42
43
    """A benchmark class.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
44
45
46
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
47
48
    """

49
    flag_methods = [
50
51
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
52
    ]
Toby Boyd's avatar
Toby Boyd committed
53

54
    self.data_dir = os.path.join(root_data_dir, 'imagenet')
55
56
    super(Resnet50KerasAccuracy, self).__init__(
        output_dir=output_dir, flag_methods=flag_methods)
57

Toby Boyd's avatar
Toby Boyd committed
58
  def benchmark_graph_8_gpu(self):
59
60
    """Test Keras model with Keras fit/dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
61
    FLAGS.num_gpus = 8
62
    FLAGS.data_dir = self.data_dir
63
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
64
    FLAGS.train_epochs = 90
65
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
66
    FLAGS.dtype = 'fp32'
67
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
68
69

  def benchmark_8_gpu(self):
70
71
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
72
    FLAGS.num_gpus = 8
73
    FLAGS.data_dir = self.data_dir
74
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
75
    FLAGS.train_epochs = 90
76
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
77
78
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
79
80
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
81
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
82

Reed's avatar
Reed committed
83
84
85
86
87
88
89
90
91
92
  def benchmark_8_gpu_fp16(self):
    """Test Keras model with eager, dist_strat, 8 GPUs, and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
93
94
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
Reed's avatar
Reed committed
95
96
97
98
99
100
101
102
103
104
105
106
107
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
108
109
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
Reed's avatar
Reed committed
110
111
    self._run_and_report_benchmark()

112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
  def benchmark_xla_8_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs, dynamic fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.loss_scale = 'dynamic'
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

128
129
130
131
132
133
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50KerasAccuracy, self)._report_benchmark(
Toby Boyd's avatar
Toby Boyd committed
134
        stats,
135
        wall_time_sec,
Toby Boyd's avatar
Toby Boyd committed
136
137
        top_1_min=MIN_TOP_1_ACCURACY,
        top_1_max=MAX_TOP_1_ACCURACY,
138
        total_batch_size=FLAGS.batch_size,
Toby Boyd's avatar
Toby Boyd committed
139
        log_steps=100)
140
141
142
143

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)

Toby Boyd's avatar
Toby Boyd committed
144
145
146
147
148

class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
  """Resnet50 benchmarks."""

  def __init__(self, output_dir=None, default_flags=None):
149
    flag_methods = [
150
151
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
152
    ]
Toby Boyd's avatar
Toby Boyd committed
153
154
155
156
157
158

    super(Resnet50KerasBenchmarkBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=default_flags)

159
160
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
Toby Boyd's avatar
Toby Boyd committed
161
    stats = keras_imagenet_main.run(FLAGS)
162
163
164
165
166
167
168
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50KerasBenchmarkBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
Toby Boyd's avatar
Toby Boyd committed
169
170

  def benchmark_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
171
    """Test Keras model with 1 GPU, no distribution strategy."""
Toby Boyd's avatar
Toby Boyd committed
172
173
174
175
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
176
    FLAGS.distribution_strategy = 'off'
177
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
Toby Boyd's avatar
Toby Boyd committed
178
    FLAGS.batch_size = 128
179
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
180
181

  def benchmark_graph_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
182
    """Test Keras model in legacy graph mode with 1 GPU, no dist strat."""
Toby Boyd's avatar
Toby Boyd committed
183
184
185
186
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
187
    FLAGS.distribution_strategy = 'off'
188
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
189
190
    FLAGS.batch_size = 96  # BatchNorm is less efficient in legacy graph mode
                           # due to its reliance on v1 cond.
191
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
192
193

  def benchmark_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
194
    """Test Keras model with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
195
196
197
198
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
199
    FLAGS.distribution_strategy = 'default'
200
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
201
    FLAGS.batch_size = 128
202
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
203

Haoyu Zhang's avatar
Haoyu Zhang committed
204
205
206
207
208
209
210
211
212
213
214
215
  def benchmark_xla_1_gpu(self):
    """Test Keras model with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
216
  def benchmark_1_gpu_fp16(self):
217
    """Test Keras model with 1 GPU and fp16."""
Reed's avatar
Reed committed
218
219
220
221
222
223
224
225
226
227
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

228
229
230
231
232
233
234
235
236
237
238
239
240
  def benchmark_1_gpu_fp16_dynamic(self):
    """Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
241
242
243
244
245
246
247
248
249
250
251
252
253
  def benchmark_xla_1_gpu_fp16(self):
    """Test Keras model with XLA, 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
  def benchmark_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_prefetch_with_slack = True
    self._run_and_report_benchmark()

269
270
271
272
273
274
275
276
277
278
279
280
281
282
  def benchmark_xla_1_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
283
  def benchmark_graph_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
284
    """Test Keras model in legacy graph mode with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
285
286
287
288
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
289
    FLAGS.distribution_strategy = 'default'
290
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
291
    FLAGS.batch_size = 128
292
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
293

Haoyu Zhang's avatar
Haoyu Zhang committed
294
295
296
297
298
299
300
301
302
303
304
305
  def benchmark_graph_xla_1_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

306
307
308
309
310
  def benchmark_graph_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
311
    FLAGS.dtype = 'fp16'
312
313
314
315
316
317
318
319
320
321
322
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16 and XLA."""
    self._setup()

    FLAGS.num_gpus = 1
323
    FLAGS.dtype = 'fp16'
324
325
326
327
328
329
330
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
  def benchmark_graph_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16, XLA, and manual
       config tuning.
    """
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
348
  def benchmark_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
349
    """Test Keras model with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
350
351
352
353
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
354
    FLAGS.distribution_strategy = 'default'
355
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
356
    FLAGS.batch_size = 128 * 8  # 8 GPUs
357
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
358

359
  def benchmark_8_gpu_tweaked(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
360
    """Test Keras model with manual config tuning and 8 GPUs."""
361
362
363
364
365
366
367
368
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    FLAGS.datasets_num_private_threads = 14
369
    FLAGS.data_prefetch_with_slack = True
370
371
    self._run_and_report_benchmark()

Haoyu Zhang's avatar
Haoyu Zhang committed
372
373
374
375
376
377
378
379
380
  def benchmark_xla_8_gpu(self):
    """Test Keras model with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
381
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
382
383
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
384
  def benchmark_8_gpu_fp16(self):
385
    """Test Keras model with 8 GPUs and fp16."""
Reed's avatar
Reed committed
386
387
388
    self._setup()

    FLAGS.num_gpus = 8
389
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
390
391
392
393
394
395
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

396
397
398
399
400
401
402
403
  def benchmark_8_gpu_fp16_tweaked(self):
    """Test Keras model with 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
404
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_tweaked')
405
406
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
407
    FLAGS.data_prefetch_with_slack = True
408
409
    self._run_and_report_benchmark()

410
411
412
413
414
415
416
417
418
419
420
421
422
  def benchmark_8_gpu_fp16_dynamic_tweaked(self):
    """Test Keras model with 8 GPUs, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
423
    FLAGS.data_prefetch_with_slack = True
424
425
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
426
  def benchmark_xla_8_gpu_fp16(self):
427
    """Test Keras model with XLA, 8 GPUs and fp16."""
Reed's avatar
Reed committed
428
429
430
    self._setup()

    FLAGS.num_gpus = 8
431
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
432
433
434
435
436
437
438
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

439
440
441
442
443
444
445
446
447
448
449
  def benchmark_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
450
451
    # FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_prefetch_with_slack = True
452
453
    self._run_and_report_benchmark()

454
455
456
457
458
459
460
461
462
463
464
465
466
467
  def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
468
    FLAGS.data_prefetch_with_slack = True
469
470
    self._run_and_report_benchmark()

471
472
473
474
475
476
477
478
479
480
481
482
483
  def benchmark_xla_8_gpu_fp16_tensorboard_tweaked(self):
    """Test to track Tensorboard performance overhead."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tensorboard_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
484
    FLAGS.data_prefetch_with_slack = True
485
486
487
    FLAGS.enable_tensorboard = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
488
  def benchmark_graph_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
489
    """Test Keras model in legacy graph mode with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
490
491
492
493
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
494
    FLAGS.distribution_strategy = 'default'
495
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
496
    FLAGS.batch_size = 128 * 8  # 8 GPUs
497
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
498

Haoyu Zhang's avatar
Haoyu Zhang committed
499
500
501
502
503
504
505
506
507
  def benchmark_graph_xla_8_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu')
508
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
509
510
    self._run_and_report_benchmark()

511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
  def benchmark_graph_xla_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

  def benchmark_graph_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with manual config tuning, XLA,
       8 GPUs and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
  def benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
557
558
559
560
561
562
  def fill_report_object(self, stats):
    super(Resnet50KerasBenchmarkBase, self).fill_report_object(
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

Toby Boyd's avatar
Toby Boyd committed
563
564
565
566

class Resnet50KerasBenchmarkSynth(Resnet50KerasBenchmarkBase):
  """Resnet50 synthetic benchmark tests."""

567
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
568
569
570
571
572
573
    def_flags = {}
    def_flags['skip_eval'] = True
    def_flags['use_synthetic_data'] = True
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

574
575
    super(Resnet50KerasBenchmarkSynth, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
Toby Boyd's avatar
Toby Boyd committed
576
577
578
579
580


class Resnet50KerasBenchmarkReal(Resnet50KerasBenchmarkBase):
  """Resnet50 real data benchmark tests."""

581
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
582
583
    def_flags = {}
    def_flags['skip_eval'] = True
584
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
Toby Boyd's avatar
Toby Boyd committed
585
586
587
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

588
589
    super(Resnet50KerasBenchmarkReal, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
590
591


592
class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
593
594
595
596
  """Trivial model with real data benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    flag_methods = [
597
598
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
599
600
601
602
603
604
605
606
607
608
    ]
    def_flags = {}
    def_flags['skip_eval'] = True
    def_flags['dtype'] = 'fp16'
    def_flags['enable_xla'] = True
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
    def_flags['train_steps'] = 600
    def_flags['log_steps'] = 100
    def_flags['distribution_strategy'] = 'default'

609
    super(TrivialKerasBenchmarkReal, self).__init__(
610
611
612
613
614
615
616
617
618
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=def_flags)

  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(FLAGS)
    wall_time_sec = time.time() - start_time_sec

619
    super(TrivialKerasBenchmarkReal, self)._report_benchmark(
620
621
622
623
624
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

625
626
627
628
629
630
631
632
633
634
635
  def benchmark_8_gpu_warmup(self):
    """Dummy test that runs over an epoch to warmup the machine."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_warmup')
    FLAGS.batch_size = 256
    FLAGS.train_steps = 700
    self._run_and_report_benchmark()

636
637
638
639
640
641
642
643
644
645
646
647
648
649
  def benchmark_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

650
    FLAGS.num_gpus = 1
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
    FLAGS.enable_eager = False
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_8_gpu(self):
    """Test trivial Keras model (input pipeline) with 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_8_gpu_tweaked(self):
    """Test trivial Keras model (input pipeline) with manual config tuning and
       8 GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
677
    FLAGS.data_prefetch_with_slack = True
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu(self):
    """Test trivial Keras model (input pipeline) in legacy graph mode with 8
       GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu_tweaked(self):
    """Test trivial Keras model (input pipeline) in legacy graph mode with
       manual config tuning and 8 GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

  def fill_report_object(self, stats):
706
    super(TrivialKerasBenchmarkReal, self).fill_report_object(
707
708
709
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
710
711
712
713


if __name__ == '__main__':
  tf.test.main()