keras_common.py 14.4 KB
Newer Older
1
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
15
"""Common util functions and classes used by both keras cifar and imagenet."""
16
17
18
19
20

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

21
22
import multiprocessing
import os
23

24
25
import numpy as np

Toby Boyd's avatar
Toby Boyd committed
26
27
28
# pylint: disable=g-bad-import-order
from absl import flags
import tensorflow as tf
29

30
31
from official.utils.misc import keras_utils
# pylint: disable=ungrouped-imports
32
from tensorflow.core.protobuf import rewriter_config_pb2
33
from tensorflow.python.eager import profiler
34
35
from tensorflow.python.keras.optimizer_v2 import (gradient_descent as
                                                  gradient_descent_v2)
36

Shining Sun's avatar
Shining Sun committed
37
FLAGS = flags.FLAGS
Shining Sun's avatar
Shining Sun committed
38
BASE_LEARNING_RATE = 0.1  # This matches Jing's version.
39
40
TRAIN_TOP_1 = 'training_accuracy_top_1'

Shining Sun's avatar
Shining Sun committed
41

42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
  """Callback to update learning rate on every batch (not epoch boundaries).

  N.B. Only support Keras optimizers, not TF optimizers.

  Args:
      schedule: a function that takes an epoch index and a batch index as input
          (both integer, indexed from 0) and returns a new learning rate as
          output (float).
  """

  def __init__(self, schedule, batch_size, num_images):
    super(LearningRateBatchScheduler, self).__init__()
    self.schedule = schedule
    self.batches_per_epoch = num_images / batch_size
    self.batch_size = batch_size
    self.epochs = -1
    self.prev_lr = -1

  def on_epoch_begin(self, epoch, logs=None):
62
63
    if not hasattr(self.model.optimizer, 'learning_rate'):
      raise ValueError('Optimizer must have a "learning_rate" attribute.')
64
65
66
    self.epochs += 1

  def on_batch_begin(self, batch, logs=None):
67
    """Executes before step begins."""
68
69
70
71
    lr = self.schedule(self.epochs,
                       batch,
                       self.batches_per_epoch,
                       self.batch_size)
72
73
74
    if not isinstance(lr, (float, np.float32, np.float64)):
      raise ValueError('The output of the "schedule" function should be float.')
    if lr != self.prev_lr:
Shining Sun's avatar
Shining Sun committed
75
      self.model.optimizer.learning_rate = lr  # lr should be a float here
76
      self.prev_lr = lr
77
78
79
      tf.compat.v1.logging.debug(
          'Epoch %05d Batch %05d: LearningRateBatchScheduler '
          'change learning rate to %s.', self.epochs, batch, lr)
80

81

82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
class ProfilerCallback(tf.keras.callbacks.Callback):
  """Save profiles in specified step range to log directory."""

  def __init__(self, log_dir, start_step, stop_step):
    super(ProfilerCallback, self).__init__()
    self.log_dir = log_dir
    self.start_step = start_step
    self.stop_step = stop_step

  def on_batch_begin(self, batch, logs=None):
    if batch == self.start_step:
      profiler.start()
      tf.compat.v1.logging.info('Profiler started at Step %s', self.start_step)

  def on_batch_end(self, batch, logs=None):
    if batch == self.stop_step:
      results = profiler.stop()
      profiler.save(self.log_dir, results)
      tf.compat.v1.logging.info(
          'Profiler saved profiles for steps between %s and %s to %s',
          self.start_step, self.stop_step, self.log_dir)


105
def get_config_proto_v1():
106
107
108
  """Return config proto according to flag settings, or None to use default."""
  config = None
  if FLAGS.enable_xla:
Haoyu Zhang's avatar
Haoyu Zhang committed
109
110
111
    # TODO(haoyuzhang): Remove this monkey patch when XLA OOM issue is fixed.
    _monkey_patch_org_assert_broadcastable()

112
    config = tf.compat.v1.ConfigProto()
113
114
115
116
117
118
119
120
121
    config.graph_options.optimizer_options.global_jit_level = (
        tf.OptimizerOptions.ON_2)
    # Disable PinToHostOptimizer in grappler when enabling XLA because it causes
    # OOM and performance regression.
    config.graph_options.rewrite_options.pin_to_host_optimization = (
        rewriter_config_pb2.RewriterConfig.OFF)
  return config


122
123
124
125
126
127
128
129
130
131
132
133
134
135
def set_config_v2():
  """Config eager context according to flag values using TF 2.0 API."""
  if FLAGS.enable_xla:
    # TODO(haoyuzhang): Remove this monkey patch when XLA OOM issue is fixed.
    _monkey_patch_org_assert_broadcastable()

    tf.config.optimizer.set_jit(True)
    # Disable PinToHostOptimizer in grappler when enabling XLA because it
    # causes OOM and performance regression.
    tf.config.optimizer.set_experimental_options(
        {"pin_to_host_optimization": False}
    )


136
137
138
139
140
141
def set_gpu_thread_mode_and_count(flags_obj):
  """Set GPU thread mode and count, and adjust dataset threads count."""
  cpu_count = multiprocessing.cpu_count()
  tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)

  # Allocate private thread pool for each GPU to schedule and launch kernels
142
  per_gpu_thread_count = flags_obj.per_gpu_thread_count or 2
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
  os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
  os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
  tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
                            os.environ['TF_GPU_THREAD_COUNT'])
  tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
                            os.environ['TF_GPU_THREAD_MODE'])

  # Limit data preprocessing threadpool to CPU cores minus number of total GPU
  # private threads and memory copy threads.
  total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
  num_mem_copy_threads = flags_obj.num_gpus
  if not flags_obj.datasets_num_private_threads:
    flags_obj.datasets_num_private_threads = (cpu_count - total_gpu_thread_count
                                              - num_mem_copy_threads)
    tf.compat.v1.logging.info('Set datasets_num_private_threads to %s',
                              flags_obj.datasets_num_private_threads)


Shining Sun's avatar
Shining Sun committed
161
def get_optimizer():
162
163
  """Returns optimizer to use."""
  # The learning_rate is overwritten at the beginning of each step by callback.
Shining Sun's avatar
Shining Sun committed
164
  return gradient_descent_v2.SGD(learning_rate=0.1, momentum=0.9)
165
166


167
def get_callbacks(learning_rate_schedule_fn, num_images):
168
  """Returns common callbacks."""
169
  time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
Shining Sun's avatar
Shining Sun committed
170
  lr_callback = LearningRateBatchScheduler(
171
172
173
      learning_rate_schedule_fn,
      batch_size=FLAGS.batch_size,
      num_images=num_images)
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
  callbacks = [time_callback, lr_callback]

  if FLAGS.enable_tensorboard:
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir=FLAGS.model_dir)
    callbacks.append(tensorboard_callback)

  if FLAGS.profile_steps:
    profiler_callback = get_profiler_callback()
    callbacks.append(profiler_callback)

  return callbacks


def get_profiler_callback():
  """Validate profile_steps flag value and return profiler callback."""
  profile_steps_error_message = (
      'profile_steps must be a comma separated pair of positive integers, '
      'specifying the first and last steps to be profiled.'
  )
  try:
    profile_steps = [int(i) for i in FLAGS.profile_steps.split(',')]
  except ValueError:
    raise ValueError(profile_steps_error_message)
  if len(profile_steps) != 2:
    raise ValueError(profile_steps_error_message)
  start_step, stop_step = profile_steps
  if start_step < 0 or start_step > stop_step:
    raise ValueError(profile_steps_error_message)
  if FLAGS.enable_tensorboard:
    tf.compat.v1.logging.warn(
        'Both TensorBoard and profiler callbacks are used. Note that the '
        'TensorBoard callback profiles the 2nd step (unless otherwise '
        'specified). Please make sure the steps profiled by the two callbacks '
        'do not overlap.')

  return ProfilerCallback(FLAGS.model_dir, start_step, stop_step)


def build_stats(history, eval_output, callbacks):
214
215
216
217
218
219
220
  """Normalizes and returns dictionary of stats.

  Args:
    history: Results of the training step. Supports both categorical_accuracy
      and sparse_categorical_accuracy.
    eval_output: Output of the eval step. Assumes first value is eval_loss and
      second value is accuracy_top_1.
221
222
    callbacks: a list of callbacks which might include a time history callback
      used during keras.fit.
223
224
225
226
227
228
229
230

  Returns:
    Dictionary of normalized results.
  """
  stats = {}
  if eval_output:
    stats['accuracy_top_1'] = eval_output[1].item()
    stats['eval_loss'] = eval_output[0].item()
231

232
233
234
235
236
237
238
239
240
241
  if history and history.history:
    train_hist = history.history
    # Gets final loss from training.
    stats['loss'] = train_hist['loss'][-1].item()
    # Gets top_1 training accuracy.
    if 'categorical_accuracy' in train_hist:
      stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
    elif 'sparse_categorical_accuracy' in train_hist:
      stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()

242
243
244
245
246
247
248
249
250
251
252
253
254
255
  if not callbacks:
    return stats

  # Look for the time history callback which was used during keras.fit
  for callback in callbacks:
    if isinstance(callback, keras_utils.TimeHistory):
      timestamp_log = callback.timestamp_log
      stats['step_timestamp_log'] = timestamp_log
      stats['train_finish_time'] = callback.train_finish_time
      if len(timestamp_log) > 1:
        stats['avg_exp_per_second'] = (
            callback.batch_size * callback.log_steps *
            (len(callback.timestamp_log)-1) /
            (timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
256
257
258
  return stats


Shining Sun's avatar
Shining Sun committed
259
def define_keras_flags():
260
  """Define flags for Keras models."""
261

Shining Sun's avatar
Shining Sun committed
262
  flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
263
  flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
Haoyu Zhang's avatar
Haoyu Zhang committed
264
265
  flags.DEFINE_boolean(name='use_trivial_model', default=False,
                       help='Whether to use a trivial Keras model.')
266
267
268
269
  flags.DEFINE_boolean(
      name='enable_xla', default=False,
      help='Whether to enable XLA auto jit compilation. This is still an '
      'experimental feature, and is not yet effective with TF 2.0.')
270
271
272
  flags.DEFINE_boolean(
      name='enable_tensorboard', default=False,
      help='Whether to enable Tensorboard callback.')
Shining Sun's avatar
Shining Sun committed
273
  flags.DEFINE_integer(
274
275
      name='train_steps', default=None,
      help='The number of steps to run for training. If it is larger than '
Shining Sun's avatar
Shining Sun committed
276
      '# batches per epoch, then use # batches per epoch. When this flag is '
277
      'set, only one epoch is going to run for training.')
278
279
280
281
282
283
284
285
  flags.DEFINE_string(
      name='profile_steps', default=None,
      help='Save profiling data to model dir at given range of steps. The '
      'value must be a comma separated pair of positive integers, specifying '
      'the first and last step to profile. For example, "--profile_steps=2,4" '
      'triggers the profiler to process 3 steps, starting from the 2nd step. '
      'Note that profiler has a non-trivial performance overhead, and the '
      'output file can be gigantic if profiling many steps.')
286

Shining Sun's avatar
Shining Sun committed
287
288

def get_synth_input_fn(height, width, num_channels, num_classes,
289
                       dtype=tf.float32, drop_remainder=True):
Shining Sun's avatar
Shining Sun committed
290
291
292
293
294
  """Returns an input function that returns a dataset with random data.

  This input_fn returns a data set that iterates over a set of random data and
  bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
  copy is still included. This used to find the upper throughput bound when
Shining Sun's avatar
Shining Sun committed
295
  tuning the full input pipeline.
Shining Sun's avatar
Shining Sun committed
296
297
298
299
300
301
302
303

  Args:
    height: Integer height that will be used to create a fake image tensor.
    width: Integer width that will be used to create a fake image tensor.
    num_channels: Integer depth that will be used to create a fake image tensor.
    num_classes: Number of classes that should be represented in the fake labels
      tensor
    dtype: Data type for features/images.
304
305
    drop_remainder: A boolean indicates whether to drop the remainder of the
      batches. If True, the batch dimension will be static.
Shining Sun's avatar
Shining Sun committed
306
307
308
309
310
311
312
313
314

  Returns:
    An input_fn that can be used in place of a real one to return a dataset
    that can be used for iteration.
  """
  # pylint: disable=unused-argument
  def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
    """Returns dataset filled with random data."""
    # Synthetic input should be within [0, 255].
315
316
317
318
319
320
321
322
323
324
325
    inputs = tf.random.truncated_normal([height, width, num_channels],
                                        dtype=dtype,
                                        mean=127,
                                        stddev=60,
                                        name='synthetic_inputs')

    labels = tf.random.uniform([1],
                               minval=0,
                               maxval=num_classes - 1,
                               dtype=tf.int32,
                               name='synthetic_labels')
326
327
328
    # Cast to float32 for Keras model.
    labels = tf.cast(labels, dtype=tf.float32)

Shining Sun's avatar
Shining Sun committed
329
    data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
330
331

    # `drop_remainder` will make dataset produce outputs with known shapes.
332
    data = data.batch(batch_size, drop_remainder=drop_remainder)
333
    data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
Shining Sun's avatar
Shining Sun committed
334
335
336
    return data

  return input_fn
Shining Sun's avatar
Shining Sun committed
337
338


339
340
341
342
343
def is_v2_0():
  """Returns true if using tf 2.0."""
  return tf.__version__.startswith('2')


Haoyu Zhang's avatar
Haoyu Zhang committed
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
def _monkey_patch_org_assert_broadcastable():
  """Monkey-patch `assert_broadcast` op to avoid OOM when enabling XLA."""
  def no_op_assert_broadcastable(weights, values):
    del weights, values
    tf.compat.v1.logging.info(
        'Using monkey-patched version of assert_broadcastable op, which always '
        'returns an no_op. It should be removed after XLA OOM issue is fixed.')
    return tf.constant([], dtype=tf.float32)

  from tensorflow.python.ops import weights_broadcast_ops  # pylint: disable=g-import-not-at-top
  if not hasattr(weights_broadcast_ops, 'org_assert_broadcastable'):
    weights_broadcast_ops.org_assert_broadcastable = (
        weights_broadcast_ops.assert_broadcastable)
  weights_broadcast_ops.assert_broadcastable = no_op_assert_broadcastable


def _undo_monkey_patch_org_assert_broadcastable():
  from tensorflow.python.ops import weights_broadcast_ops  # pylint: disable=g-import-not-at-top
  if hasattr(weights_broadcast_ops, 'org_assert_broadcastable'):
    weights_broadcast_ops.assert_broadcastable = (
        weights_broadcast_ops.org_assert_broadcastable)