keras_cifar_main.py 8.96 KB
Newer Older
Shining Sun's avatar
Shining Sun committed
1
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
Shining Sun's avatar
Shining Sun committed
15
"""Runs a ResNet model on the Cifar-10 dataset."""
16
17
18
19
20
21
22
23
24
25

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from absl import app as absl_app
from absl import flags
import tensorflow as tf  # pylint: disable=g-bad-import-order

from official.resnet import cifar10_main as cifar_main
26
from official.resnet.keras import keras_common
Shining Sun's avatar
Shining Sun committed
27
from official.resnet.keras import resnet_cifar_model
28
29
30
from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import distribution_utils
Toby Boyd's avatar
Toby Boyd committed
31
from official.utils.misc import keras_utils
32
33


34
35
LR_SCHEDULE = [  # (multiplier, epoch to start) tuples
    (0.1, 91), (0.01, 136), (0.001, 182)
36
37
]

38

39
40
41
42
def learning_rate_schedule(current_epoch,
                           current_batch,
                           batches_per_epoch,
                           batch_size):
Shining Sun's avatar
Shining Sun committed
43
  """Handles linear scaling rule and LR decay.
44

45
46
  Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
  provided scaling factor.
47
48
49
50

  Args:
    current_epoch: integer, current epoch indexed from 0.
    current_batch: integer, current batch in the current epoch, indexed from 0.
51
52
    batches_per_epoch: integer, number of steps in an epoch.
    batch_size: integer, total batch sized.
53
54
55
56

  Returns:
    Adjusted learning rate.
  """
57
  del current_batch, batches_per_epoch  # not used
Shining Sun's avatar
Shining Sun committed
58
  initial_learning_rate = keras_common.BASE_LEARNING_RATE * batch_size / 128
59
  learning_rate = initial_learning_rate
60
  for mult, start_epoch in LR_SCHEDULE:
61
62
    if current_epoch >= start_epoch:
      learning_rate = initial_learning_rate * mult
63
64
65
66
67
68
69
70
71
72
73
    else:
      break
  return learning_rate


def parse_record_keras(raw_record, is_training, dtype):
  """Parses a record containing a training example of an image.

  The input record is parsed into a label and image, and the image is passed
  through preprocessing steps (cropping, flipping, and so on).

Shining Sun's avatar
Shining Sun committed
74
  This method converts the label to one hot to fit the loss function.
75

76
77
78
79
80
81
82
83
84
85
  Args:
    raw_record: scalar Tensor tf.string containing a serialized
      Example protocol buffer.
    is_training: A boolean denoting whether the input is for training.
    dtype: Data type to use for input images.

  Returns:
    Tuple with processed image tensor and one-hot-encoded label tensor.
  """
  image, label = cifar_main.parse_record(raw_record, is_training, dtype)
86
  label = tf.compat.v1.sparse_to_dense(label, (cifar_main.NUM_CLASSES,), 1)
87
88
89
  return image, label


Shining Sun's avatar
Shining Sun committed
90
91
def run(flags_obj):
  """Run ResNet Cifar-10 training and eval loop using native Keras APIs.
92
93
94
95
96
97

  Args:
    flags_obj: An object containing parsed flag values.

  Raises:
    ValueError: If fp16 is passed as it is not currently supported.
98
99
100

  Returns:
    Dictionary of training and eval stats.
101
  """
102
103
104
105
106
107
108
109
110
111
  keras_utils.set_session_config(
      enable_eager=flags_obj.enable_eager,
      enable_xla=flags_obj.enable_xla,
      enable_grappler_layout_optimizer=
      flags_obj.enable_grappler_layout_optimizer)

  # Execute flag override logic for better model performance
  if flags_obj.tf_gpu_thread_mode:
    keras_common.set_gpu_thread_mode_and_count(flags_obj)
  keras_common.set_cudnn_batchnorm_mode()
112

113
114
115
116
117
  dtype = flags_core.get_tf_dtype(flags_obj)
  if dtype == 'fp16':
    raise ValueError('dtype fp16 is not supported in Keras. Use the default '
                     'value(fp32).')

118
119
120
121
122
  data_format = flags_obj.data_format
  if data_format is None:
    data_format = ('channels_first'
                   if tf.test.is_built_with_cuda() else 'channels_last')
  tf.keras.backend.set_image_data_format(data_format)
123

124
125
  strategy = distribution_utils.get_distribution_strategy(
      distribution_strategy=flags_obj.distribution_strategy,
126
127
128
129
      num_gpus=flags_obj.num_gpus,
      num_workers=distribution_utils.configure_cluster(),
      all_reduce_alg=flags_obj.all_reduce_alg,
      num_packs=flags_obj.num_packs)
130

131
132
133
134
135
136
137
138
  if strategy:
    # flags_obj.enable_get_next_as_optional controls whether enabling
    # get_next_as_optional behavior in DistributedIterator. If true, last
    # partial batch can be supported.
    strategy.extended.experimental_enable_get_next_as_optional = (
        flags_obj.enable_get_next_as_optional
    )

139
  strategy_scope = distribution_utils.get_strategy_scope(strategy)
140

141
  if flags_obj.use_synthetic_data:
142
    distribution_utils.set_up_synthetic_data()
Shining Sun's avatar
Shining Sun committed
143
    input_fn = keras_common.get_synth_input_fn(
144
145
146
147
        height=cifar_main.HEIGHT,
        width=cifar_main.WIDTH,
        num_channels=cifar_main.NUM_CHANNELS,
        num_classes=cifar_main.NUM_CLASSES,
148
149
        dtype=flags_core.get_tf_dtype(flags_obj),
        drop_remainder=True)
150
  else:
151
    distribution_utils.undo_set_up_synthetic_data()
Shining Sun's avatar
Shining Sun committed
152
153
154
155
156
    input_fn = cifar_main.input_fn

  train_input_dataset = input_fn(
      is_training=True,
      data_dir=flags_obj.data_dir,
157
      batch_size=flags_obj.batch_size,
Shining Sun's avatar
Shining Sun committed
158
      num_epochs=flags_obj.train_epochs,
159
160
      parse_record_fn=parse_record_keras,
      datasets_num_private_threads=flags_obj.datasets_num_private_threads,
161
162
163
164
165
      dtype=dtype,
      # Setting drop_remainder to avoid the partial batch logic in normalization
      # layer, which triggers tf.where and leads to extra memory copy of input
      # sizes between host and GPU.
      drop_remainder=(not flags_obj.enable_get_next_as_optional))
166
167
168
169
170
171
172
173
174

  eval_input_dataset = None
  if not flags_obj.skip_eval:
    eval_input_dataset = input_fn(
        is_training=False,
        data_dir=flags_obj.data_dir,
        batch_size=flags_obj.batch_size,
        num_epochs=flags_obj.train_epochs,
        parse_record_fn=parse_record_keras)
175

Shining Sun's avatar
Shining Sun committed
176
  with strategy_scope:
Shining Sun's avatar
Shining Sun committed
177
178
    optimizer = keras_common.get_optimizer()
    model = resnet_cifar_model.resnet56(classes=cifar_main.NUM_CLASSES)
Shining Sun's avatar
Shining Sun committed
179

Shining Sun's avatar
Shining Sun committed
180
181
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
182
183
                  metrics=(['categorical_accuracy']
                           if flags_obj.report_accuracy_metrics else None),
184
185
                  run_eagerly=flags_obj.run_eagerly,
                  run_distributed=flags_obj.force_v2_in_keras_compile)
Shining Sun's avatar
Shining Sun committed
186

187
  callbacks = keras_common.get_callbacks(
188
      learning_rate_schedule, cifar_main.NUM_IMAGES['train'])
189

Shining Sun's avatar
Shining Sun committed
190
191
192
193
194
195
196
  train_steps = cifar_main.NUM_IMAGES['train'] // flags_obj.batch_size
  train_epochs = flags_obj.train_epochs

  if flags_obj.train_steps:
    train_steps = min(flags_obj.train_steps, train_steps)
    train_epochs = 1

197
  num_eval_steps = (cifar_main.NUM_IMAGES['validation'] //
198
199
                    flags_obj.batch_size)

Shining Sun's avatar
Shining Sun committed
200
201
  validation_data = eval_input_dataset
  if flags_obj.skip_eval:
202
203
204
205
    if flags_obj.set_learning_phase_to_train:
      # TODO(haoyuzhang): Understand slowdown of setting learning phase when
      # not using distribution strategy.
      tf.keras.backend.set_learning_phase(1)
Shining Sun's avatar
Shining Sun committed
206
207
208
    num_eval_steps = None
    validation_data = None

209
210
211
212
213
214
  if not strategy and flags_obj.explicit_gpu_placement:
    # TODO(b/135607227): Add device scope automatically in Keras training loop
    # when not using distribition strategy.
    no_dist_strat_device = tf.device('/device:GPU:0')
    no_dist_strat_device.__enter__()

215
  history = model.fit(train_input_dataset,
216
217
                      epochs=train_epochs,
                      steps_per_epoch=train_steps,
218
                      callbacks=callbacks,
219
220
                      validation_steps=num_eval_steps,
                      validation_data=validation_data,
221
                      validation_freq=flags_obj.epochs_between_evals,
222
                      verbose=2)
223
  eval_output = None
224
  if not flags_obj.skip_eval:
Shining Sun's avatar
Shining Sun committed
225
226
    eval_output = model.evaluate(eval_input_dataset,
                                 steps=num_eval_steps,
227
                                 verbose=2)
228
229
230
231

  if not strategy and flags_obj.explicit_gpu_placement:
    no_dist_strat_device.__exit__()

232
  stats = keras_common.build_stats(history, eval_output, callbacks)
233
  return stats
234

235

236
237
238
239
240
241
242
243
244
245
def define_cifar_flags():
  keras_common.define_keras_flags(dynamic_loss_scale=False)

  flags_core.set_defaults(data_dir='/tmp/cifar10_data/cifar-10-batches-bin',
                          model_dir='/tmp/cifar10_model',
                          train_epochs=182,
                          epochs_between_evals=10,
                          batch_size=128)


246
def main(_):
247
  with logger.benchmark_context(flags.FLAGS):
248
    return run(flags.FLAGS)
249
250
251


if __name__ == '__main__':
252
  tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
253
  define_cifar_flags()
254
  absl_app.run(main)