keras_utils.py 7.65 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for the Keras implementations of models."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import time

import tensorflow as tf
Toby Boyd's avatar
Toby Boyd committed
24
from tensorflow.core.protobuf import rewriter_config_pb2
Toby Boyd's avatar
Toby Boyd committed
25
from tensorflow.python import tf2
26
from tensorflow.python.eager import profiler
27
28
29
30
31
32
33
34
35


class BatchTimestamp(object):
  """A structure to store batch time stamp."""

  def __init__(self, batch_index, timestamp):
    self.batch_index = batch_index
    self.timestamp = timestamp

36
37
38
39
  def __repr__(self):
    return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
        self.batch_index, self.timestamp)

40
41
42
43
44

class TimeHistory(tf.keras.callbacks.Callback):
  """Callback for Keras models."""

  def __init__(self, batch_size, log_steps):
45
    """Callback for logging performance.
Shining Sun's avatar
Shining Sun committed
46

47
48
    Args:
      batch_size: Total batch size.
49
      log_steps: Interval of steps between logging of batch level stats.
50
51
52
53
    """
    self.batch_size = batch_size
    super(TimeHistory, self).__init__()
    self.log_steps = log_steps
54
    self.global_steps = 0
55

56
    # Logs start of step 1 then end of each step based on log_steps interval.
57
58
    self.timestamp_log = []

59
60
61
    # Records the time each epoch takes to run from start to finish of epoch.
    self.epoch_runtime_log = []

62
63
64
  def on_train_end(self, logs=None):
    self.train_finish_time = time.time()

65
66
67
  def on_epoch_begin(self, epoch, logs=None):
    self.epoch_start = time.time()

68
  def on_batch_begin(self, batch, logs=None):
69
70
71
72
73
    self.global_steps += 1
    if self.global_steps == 1:
      self.start_time = time.time()
      self.timestamp_log.append(BatchTimestamp(self.global_steps,
                                               self.start_time))
74
75

  def on_batch_end(self, batch, logs=None):
76
77
    """Records elapse time of the batch and calculates examples per second."""
    if self.global_steps % self.log_steps == 0:
78
79
80
      timestamp = time.time()
      elapsed_time = timestamp - self.start_time
      examples_per_second = (self.batch_size * self.log_steps) / elapsed_time
81
82
83
84
85
86
      self.timestamp_log.append(BatchTimestamp(self.global_steps, timestamp))
      tf.compat.v1.logging.info(
          "BenchmarkMetric: {'global step':%d, 'time_taken': %f,"
          "'examples_per_second': %f}" %
          (self.global_steps, elapsed_time, examples_per_second))
      self.start_time = timestamp
87

88
89
90
91
92
93
94
  def on_epoch_end(self, epoch, logs=None):
    epoch_run_time = time.time() - self.epoch_start
    self.epoch_runtime_log.append(epoch_run_time)
    tf.compat.v1.logging.info(
        "BenchmarkMetric: {'epoch':%d, 'time_taken': %f}" %
        (epoch, epoch_run_time))

95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141

def get_profiler_callback(model_dir, profile_steps, enable_tensorboard):
  """Validate profile_steps flag value and return profiler callback."""
  profile_steps_error_message = (
      'profile_steps must be a comma separated pair of positive integers, '
      'specifying the first and last steps to be profiled.'
  )
  try:
    profile_steps = [int(i) for i in profile_steps.split(',')]
  except ValueError:
    raise ValueError(profile_steps_error_message)
  if len(profile_steps) != 2:
    raise ValueError(profile_steps_error_message)
  start_step, stop_step = profile_steps
  if start_step < 0 or start_step > stop_step:
    raise ValueError(profile_steps_error_message)
  if enable_tensorboard:
    tf.compat.v1.logging.warn(
        'Both TensorBoard and profiler callbacks are used. Note that the '
        'TensorBoard callback profiles the 2nd step (unless otherwise '
        'specified). Please make sure the steps profiled by the two callbacks '
        'do not overlap.')

  return ProfilerCallback(model_dir, start_step, stop_step)


class ProfilerCallback(tf.keras.callbacks.Callback):
  """Save profiles in specified step range to log directory."""

  def __init__(self, log_dir, start_step, stop_step):
    super(ProfilerCallback, self).__init__()
    self.log_dir = log_dir
    self.start_step = start_step
    self.stop_step = stop_step

  def on_batch_begin(self, batch, logs=None):
    if batch == self.start_step:
      profiler.start()
      tf.compat.v1.logging.info('Profiler started at Step %s', self.start_step)

  def on_batch_end(self, batch, logs=None):
    if batch == self.stop_step:
      results = profiler.stop()
      profiler.save(self.log_dir, results)
      tf.compat.v1.logging.info(
          'Profiler saved profiles for steps between %s and %s to %s',
          self.start_step, self.stop_step, self.log_dir)
Toby Boyd's avatar
Toby Boyd committed
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205


def set_session_config(enable_eager=False,
                       enable_xla=False,
                       enable_grappler_layout_optimizer=True):
  """Sets the session config."""
  if is_v2_0():
    set_config_v2(
        enable_xla=enable_xla,
        enable_grappler_layout_optimizer=enable_grappler_layout_optimizer)
  else:
    config = get_config_proto_v1(
        enable_xla=enable_xla,
        enable_grappler_layout_optimizer=enable_grappler_layout_optimizer)
    if enable_eager:
      tf.compat.v1.enable_eager_execution(config=config)
    else:
      sess = tf.Session(config=config)
      tf.keras.backend.set_session(sess)


def get_config_proto_v1(enable_xla=False,
                        enable_grappler_layout_optimizer=True):
  """Return config proto according to flag settings, or None to use default."""
  config = None
  if enable_xla:
    config = tf.compat.v1.ConfigProto()
    config.graph_options.optimizer_options.global_jit_level = (
        tf.OptimizerOptions.ON_2)
    # Disable PinToHostOptimizer in grappler when enabling XLA because it causes
    # OOM and performance regression.
    config.graph_options.rewrite_options.pin_to_host_optimization = (
        rewriter_config_pb2.RewriterConfig.OFF)
  # TODO(b/76028325): Remove when generic layout optimizer will be ready.
  if not enable_grappler_layout_optimizer:
    if config is None:
      config = tf.compat.v1.ConfigProto()
    # Disable LayoutOptimizer in grappler, because it might de-optimize fp16
    # graphs, and force NCHW data format in all convolutions and batch
    # normalizations.
    config.graph_options.rewrite_options.layout_optimizer = (
        rewriter_config_pb2.RewriterConfig.OFF)
  return config


def set_config_v2(enable_xla=False,
                  enable_grappler_layout_optimizer=False):
  """Config eager context according to flag values using TF 2.0 API."""
  if enable_xla:
    tf.config.optimizer.set_jit(True)
    # Disable PinToHostOptimizer in grappler when enabling XLA because it
    # causes OOM and performance regression.
    tf.config.optimizer.set_experimental_options(
        {'pin_to_host_optimization': False}
    )
  # TODO(b/76028325): Remove when generic layout optimizer will be ready.
  if not enable_grappler_layout_optimizer:
    # Disable LayoutOptimizer in grappler, because it might de-optimize fp16
    # graphs, and force NCHW data format in all convolutions and batch
    # normalizations.
    tf.config.optimizer.set_experimental_options(
        {'layout_optimizer': False}
    )

Toby Boyd's avatar
Toby Boyd committed
206

Toby Boyd's avatar
Toby Boyd committed
207
208
def is_v2_0():
  """Returns true if using tf 2.0."""
Toby Boyd's avatar
Toby Boyd committed
209
  return tf2.enabled()