keras_common.py 13.3 KB
Newer Older
1
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
15
"""Common util functions and classes used by both keras cifar and imagenet."""
16
17
18
19
20

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

21
22
import multiprocessing
import os
23
24
import time

25
26
import numpy as np

Toby Boyd's avatar
Toby Boyd committed
27
28
29
# pylint: disable=g-bad-import-order
from absl import flags
import tensorflow as tf
30
31

from tensorflow.core.protobuf import rewriter_config_pb2
32
33
from tensorflow.python.keras.optimizer_v2 import (gradient_descent as
                                                  gradient_descent_v2)
34

Shining Sun's avatar
Shining Sun committed
35
FLAGS = flags.FLAGS
Shining Sun's avatar
Shining Sun committed
36
BASE_LEARNING_RATE = 0.1  # This matches Jing's version.
37
38
TRAIN_TOP_1 = 'training_accuracy_top_1'

Shining Sun's avatar
Shining Sun committed
39

40
41
42
43
44
45
46
47
class BatchTimestamp(object):
  """A structure to store batch time stamp."""

  def __init__(self, batch_index, timestamp):
    self.batch_index = batch_index
    self.timestamp = timestamp


48
49
50
class TimeHistory(tf.keras.callbacks.Callback):
  """Callback for Keras models."""

51
  def __init__(self, batch_size, log_steps):
52
    """Callback for logging performance (# image/second).
53
54
55

    Args:
      batch_size: Total batch size.
56
      log_steps: Interval of time history logs.
57
58

    """
59
    self.batch_size = batch_size
60
    super(TimeHistory, self).__init__()
61
62
    self.log_steps = log_steps

63
64
    # Logs start of step 0 then end of each step based on log_steps interval.
    self.timestamp_log = []
65
66
67
68

  def on_train_begin(self, logs=None):
    self.record_batch = True

69
70
71
  def on_train_end(self, logs=None):
    self.train_finish_time = time.time()

72
73
  def on_batch_begin(self, batch, logs=None):
    if self.record_batch:
74
75
      timestamp = time.time()
      self.start_time = timestamp
76
      self.record_batch = False
77
78
      if batch == 0:
        self.timestamp_log.append(BatchTimestamp(batch, timestamp))
79
80

  def on_batch_end(self, batch, logs=None):
Shining Sun's avatar
Shining Sun committed
81
    if batch % self.log_steps == 0:
82
83
      timestamp = time.time()
      elapsed_time = timestamp - self.start_time
84
      examples_per_second = (self.batch_size * self.log_steps) / elapsed_time
85
      if batch != 0:
86
87
        self.record_batch = True
        self.timestamp_log.append(BatchTimestamp(batch, timestamp))
88
89
90
91
        tf.compat.v1.logging.info(
            "BenchmarkMetric: {'num_batches':%d, 'time_taken': %f,"
            "'images_per_second': %f}" %
            (batch, elapsed_time, examples_per_second))
92

93

94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
  """Callback to update learning rate on every batch (not epoch boundaries).

  N.B. Only support Keras optimizers, not TF optimizers.

  Args:
      schedule: a function that takes an epoch index and a batch index as input
          (both integer, indexed from 0) and returns a new learning rate as
          output (float).
  """

  def __init__(self, schedule, batch_size, num_images):
    super(LearningRateBatchScheduler, self).__init__()
    self.schedule = schedule
    self.batches_per_epoch = num_images / batch_size
    self.batch_size = batch_size
    self.epochs = -1
    self.prev_lr = -1

  def on_epoch_begin(self, epoch, logs=None):
114
115
    if not hasattr(self.model.optimizer, 'learning_rate'):
      raise ValueError('Optimizer must have a "learning_rate" attribute.')
116
117
118
    self.epochs += 1

  def on_batch_begin(self, batch, logs=None):
119
    """Executes before step begins."""
120
121
122
123
    lr = self.schedule(self.epochs,
                       batch,
                       self.batches_per_epoch,
                       self.batch_size)
124
125
126
    if not isinstance(lr, (float, np.float32, np.float64)):
      raise ValueError('The output of the "schedule" function should be float.')
    if lr != self.prev_lr:
Shining Sun's avatar
Shining Sun committed
127
      self.model.optimizer.learning_rate = lr  # lr should be a float here
128
      self.prev_lr = lr
129
130
131
      tf.compat.v1.logging.debug(
          'Epoch %05d Batch %05d: LearningRateBatchScheduler '
          'change learning rate to %s.', self.epochs, batch, lr)
132

133

134
def get_config_proto_v1():
135
136
137
  """Return config proto according to flag settings, or None to use default."""
  config = None
  if FLAGS.enable_xla:
Haoyu Zhang's avatar
Haoyu Zhang committed
138
139
140
    # TODO(haoyuzhang): Remove this monkey patch when XLA OOM issue is fixed.
    _monkey_patch_org_assert_broadcastable()

141
    config = tf.compat.v1.ConfigProto()
142
143
144
145
146
147
148
149
150
    config.graph_options.optimizer_options.global_jit_level = (
        tf.OptimizerOptions.ON_2)
    # Disable PinToHostOptimizer in grappler when enabling XLA because it causes
    # OOM and performance regression.
    config.graph_options.rewrite_options.pin_to_host_optimization = (
        rewriter_config_pb2.RewriterConfig.OFF)
  return config


151
152
153
154
155
156
157
158
159
160
161
162
163
164
def set_config_v2():
  """Config eager context according to flag values using TF 2.0 API."""
  if FLAGS.enable_xla:
    # TODO(haoyuzhang): Remove this monkey patch when XLA OOM issue is fixed.
    _monkey_patch_org_assert_broadcastable()

    tf.config.optimizer.set_jit(True)
    # Disable PinToHostOptimizer in grappler when enabling XLA because it
    # causes OOM and performance regression.
    tf.config.optimizer.set_experimental_options(
        {"pin_to_host_optimization": False}
    )


165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def set_gpu_thread_mode_and_count(flags_obj):
  """Set GPU thread mode and count, and adjust dataset threads count."""
  cpu_count = multiprocessing.cpu_count()
  tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)

  # Allocate private thread pool for each GPU to schedule and launch kernels
  per_gpu_thread_count = flags_obj.per_gpu_thread_count or 2
  os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
  os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
  tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
                            os.environ['TF_GPU_THREAD_COUNT'])
  tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
                            os.environ['TF_GPU_THREAD_MODE'])

  # Limit data preprocessing threadpool to CPU cores minus number of total GPU
  # private threads and memory copy threads.
  total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
  num_mem_copy_threads = flags_obj.num_gpus
  if not flags_obj.datasets_num_private_threads:
    flags_obj.datasets_num_private_threads = (cpu_count - total_gpu_thread_count
                                              - num_mem_copy_threads)
    tf.compat.v1.logging.info('Set datasets_num_private_threads to %s',
                              flags_obj.datasets_num_private_threads)


Shining Sun's avatar
Shining Sun committed
190
def get_optimizer():
191
192
  """Returns optimizer to use."""
  # The learning_rate is overwritten at the beginning of each step by callback.
Shining Sun's avatar
Shining Sun committed
193
  return gradient_descent_v2.SGD(learning_rate=0.1, momentum=0.9)
194
195


196
def get_callbacks(learning_rate_schedule_fn, num_images):
197
  """Returns common callbacks."""
198
  time_callback = TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
199
200

  tensorboard_callback = tf.keras.callbacks.TensorBoard(
201
      log_dir=FLAGS.model_dir)
202

Shining Sun's avatar
Shining Sun committed
203
  lr_callback = LearningRateBatchScheduler(
204
205
206
      learning_rate_schedule_fn,
      batch_size=FLAGS.batch_size,
      num_images=num_images)
207
208
209

  return time_callback, tensorboard_callback, lr_callback

Shining Sun's avatar
Shining Sun committed
210

211
def build_stats(history, eval_output, time_callback):
212
213
214
215
216
217
218
  """Normalizes and returns dictionary of stats.

  Args:
    history: Results of the training step. Supports both categorical_accuracy
      and sparse_categorical_accuracy.
    eval_output: Output of the eval step. Assumes first value is eval_loss and
      second value is accuracy_top_1.
219
    time_callback: Time tracking callback likely used during keras.fit.
220
221
222
223
224
225
226
227

  Returns:
    Dictionary of normalized results.
  """
  stats = {}
  if eval_output:
    stats['accuracy_top_1'] = eval_output[1].item()
    stats['eval_loss'] = eval_output[0].item()
228

229
230
231
232
233
234
235
236
237
238
  if history and history.history:
    train_hist = history.history
    # Gets final loss from training.
    stats['loss'] = train_hist['loss'][-1].item()
    # Gets top_1 training accuracy.
    if 'categorical_accuracy' in train_hist:
      stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
    elif 'sparse_categorical_accuracy' in train_hist:
      stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()

239
  if time_callback:
240
241
    timestamp_log = time_callback.timestamp_log
    stats['step_timestamp_log'] = timestamp_log
242
    stats['train_finish_time'] = time_callback.train_finish_time
243
244
245
246
247
    if len(timestamp_log) > 1:
      stats['avg_exp_per_second'] = (
          time_callback.batch_size * time_callback.log_steps *
          (len(time_callback.timestamp_log)-1) /
          (timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
248

249
250
251
  return stats


Shining Sun's avatar
Shining Sun committed
252
def define_keras_flags():
253
  """Define flags for Keras models."""
Shining Sun's avatar
Shining Sun committed
254
  flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
255
  flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
256
257
258
259
  flags.DEFINE_boolean(
      name='enable_xla', default=False,
      help='Whether to enable XLA auto jit compilation. This is still an '
      'experimental feature, and is not yet effective with TF 2.0.')
Shining Sun's avatar
Shining Sun committed
260
  flags.DEFINE_integer(
261
262
      name='train_steps', default=None,
      help='The number of steps to run for training. If it is larger than '
Shining Sun's avatar
Shining Sun committed
263
      '# batches per epoch, then use # batches per epoch. When this flag is '
264
      'set, only one epoch is going to run for training.')
265
266
267
268
269
  flags.DEFINE_integer(
      name='log_steps', default=100,
      help='For every log_steps, we log the timing information such as '
      'examples per second. Besides, for every log_steps, we store the '
      'timestamp of a batch end.')
270

Shining Sun's avatar
Shining Sun committed
271
272
273
274
275
276
277
278

def get_synth_input_fn(height, width, num_channels, num_classes,
                       dtype=tf.float32):
  """Returns an input function that returns a dataset with random data.

  This input_fn returns a data set that iterates over a set of random data and
  bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
  copy is still included. This used to find the upper throughput bound when
Shining Sun's avatar
Shining Sun committed
279
  tuning the full input pipeline.
Shining Sun's avatar
Shining Sun committed
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296

  Args:
    height: Integer height that will be used to create a fake image tensor.
    width: Integer width that will be used to create a fake image tensor.
    num_channels: Integer depth that will be used to create a fake image tensor.
    num_classes: Number of classes that should be represented in the fake labels
      tensor
    dtype: Data type for features/images.

  Returns:
    An input_fn that can be used in place of a real one to return a dataset
    that can be used for iteration.
  """
  # pylint: disable=unused-argument
  def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
    """Returns dataset filled with random data."""
    # Synthetic input should be within [0, 255].
297
298
299
300
301
302
303
304
305
306
307
    inputs = tf.random.truncated_normal([height, width, num_channels],
                                        dtype=dtype,
                                        mean=127,
                                        stddev=60,
                                        name='synthetic_inputs')

    labels = tf.random.uniform([1],
                               minval=0,
                               maxval=num_classes - 1,
                               dtype=tf.int32,
                               name='synthetic_labels')
308
309
310
    # Cast to float32 for Keras model.
    labels = tf.cast(labels, dtype=tf.float32)

Shining Sun's avatar
Shining Sun committed
311
    data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
312
313
314

    # `drop_remainder` will make dataset produce outputs with known shapes.
    data = data.batch(batch_size, drop_remainder=True)
315
    data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
Shining Sun's avatar
Shining Sun committed
316
317
318
    return data

  return input_fn
Shining Sun's avatar
Shining Sun committed
319
320


321
322
323
324
325
def is_v2_0():
  """Returns true if using tf 2.0."""
  return tf.__version__.startswith('2')


Shining Sun's avatar
Shining Sun committed
326
327
328
329
def get_strategy_scope(strategy):
  if strategy:
    strategy_scope = strategy.scope()
  else:
Shining Sun's avatar
Shining Sun committed
330
    strategy_scope = DummyContextManager()
Shining Sun's avatar
Shining Sun committed
331
332
333
334
335

  return strategy_scope


class DummyContextManager(object):
Shining Sun's avatar
Shining Sun committed
336

Shining Sun's avatar
Shining Sun committed
337
338
339
340
341
  def __enter__(self):
    pass

  def __exit__(self, *args):
    pass
Haoyu Zhang's avatar
Haoyu Zhang committed
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364


def _monkey_patch_org_assert_broadcastable():
  """Monkey-patch `assert_broadcast` op to avoid OOM when enabling XLA."""
  def no_op_assert_broadcastable(weights, values):
    del weights, values
    tf.compat.v1.logging.info(
        'Using monkey-patched version of assert_broadcastable op, which always '
        'returns an no_op. It should be removed after XLA OOM issue is fixed.')
    return tf.constant([], dtype=tf.float32)

  from tensorflow.python.ops import weights_broadcast_ops  # pylint: disable=g-import-not-at-top
  if not hasattr(weights_broadcast_ops, 'org_assert_broadcastable'):
    weights_broadcast_ops.org_assert_broadcastable = (
        weights_broadcast_ops.assert_broadcastable)
  weights_broadcast_ops.assert_broadcastable = no_op_assert_broadcastable


def _undo_monkey_patch_org_assert_broadcastable():
  from tensorflow.python.ops import weights_broadcast_ops  # pylint: disable=g-import-not-at-top
  if hasattr(weights_broadcast_ops, 'org_assert_broadcastable'):
    weights_broadcast_ops.assert_broadcastable = (
        weights_broadcast_ops.org_assert_broadcastable)