"finetune_demo/sft.sh" did not exist on "7769bf8b0e6d68c05ec16b57f8c2b9612cc49446"
keras_cifar_main.py 6.1 KB
Newer Older
Shining Sun's avatar
Shining Sun committed
1
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
Shining Sun's avatar
Shining Sun committed
15
"""Runs a ResNet model on the Cifar-10 dataset."""
16
17
18
19
20
21
22
23
24
25
26

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from absl import app as absl_app
from absl import flags
import tensorflow as tf  # pylint: disable=g-bad-import-order

from official.resnet import cifar10_main as cifar_main
from official.resnet import resnet_run_loop
27
from official.resnet.keras import keras_common
28
from official.resnet.keras import resnet56
29
30
31
32
33
from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import distribution_utils


34
35
LR_SCHEDULE = [  # (multiplier, epoch to start) tuples
    (0.1, 91), (0.01, 136), (0.001, 182)
36
37
]

38
39

def learning_rate_schedule(current_epoch, current_batch, batches_per_epoch, batch_size):
40
41
  """Handles linear scaling rule, gradual warmup, and LR decay.

Shining Sun's avatar
Shining Sun committed
42
43
  The learning rate starts at base learning_rate, then after 91, 136 and
  182 epochs, the learning rate is divided by 10.
44
45
46
47
48
49
50
51

  Args:
    current_epoch: integer, current epoch indexed from 0.
    current_batch: integer, current batch in the current epoch, indexed from 0.

  Returns:
    Adjusted learning rate.
  """
Shining Sun's avatar
Shining Sun committed
52
  initial_learning_rate = keras_common.BASE_LEARNING_RATE * batch_size / 128
53
  learning_rate = initial_learning_rate
54
  for mult, start_epoch in LR_SCHEDULE:
55
56
    if current_epoch >= start_epoch:
      learning_rate = initial_learning_rate * mult
57
58
59
60
61
62
63
64
65
66
67
    else:
      break
  return learning_rate


def parse_record_keras(raw_record, is_training, dtype):
  """Parses a record containing a training example of an image.

  The input record is parsed into a label and image, and the image is passed
  through preprocessing steps (cropping, flipping, and so on).

68
69
  This method converts the label to onhot to fit the loss function.

70
71
72
73
74
75
76
77
78
79
  Args:
    raw_record: scalar Tensor tf.string containing a serialized
      Example protocol buffer.
    is_training: A boolean denoting whether the input is for training.
    dtype: Data type to use for input images.

  Returns:
    Tuple with processed image tensor and one-hot-encoded label tensor.
  """
  image, label = cifar_main.parse_record(raw_record, is_training, dtype)
80
  label = tf.sparse_to_dense(label, (cifar_main.NUM_CLASSES,), 1)
81
82
83
  return image, label


Shining Sun's avatar
Shining Sun committed
84
85
def run(flags_obj):
  """Run ResNet Cifar-10 training and eval loop using native Keras APIs.
86
87
88
89
90
91
92

  Args:
    flags_obj: An object containing parsed flag values.

  Raises:
    ValueError: If fp16 is passed as it is not currently supported.
  """
93
94
95
  if flags_obj.enable_eager:
    tf.enable_eager_execution()

96
97
98
99
100
101
102
103
104
  dtype = flags_core.get_tf_dtype(flags_obj)
  if dtype == 'fp16':
    raise ValueError('dtype fp16 is not supported in Keras. Use the default '
                     'value(fp32).')

  per_device_batch_size = distribution_utils.per_device_batch_size(
      flags_obj.batch_size, flags_core.get_num_gpus(flags_obj))

  if flags_obj.use_synthetic_data:
Shining Sun's avatar
Shining Sun committed
105
    input_fn = keras_common.get_synth_input_fn(
106
107
108
109
        height=cifar_main.HEIGHT,
        width=cifar_main.WIDTH,
        num_channels=cifar_main.NUM_CHANNELS,
        num_classes=cifar_main.NUM_CLASSES,
Shining Sun's avatar
Shining Sun committed
110
        dtype=flags_core.get_tf_dtype(flags_obj))
111
  else:
Shining Sun's avatar
Shining Sun committed
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
    input_fn = cifar_main.input_fn

  train_input_dataset = input_fn(
      is_training=True,
      data_dir=flags_obj.data_dir,
      batch_size=per_device_batch_size,
      num_epochs=flags_obj.train_epochs,
      parse_record_fn=parse_record_keras)

  eval_input_dataset = input_fn(
      is_training=False,
      data_dir=flags_obj.data_dir,
      batch_size=per_device_batch_size,
      num_epochs=flags_obj.train_epochs,
      parse_record_fn=parse_record_keras)
127

Shining Sun's avatar
Shining Sun committed
128
  optimizer = keras_common.get_optimizer()
129
130
  strategy = distribution_utils.get_distribution_strategy(
    flags_obj.num_gpus, flags_obj.use_one_device_strategy)
131

132
  model = resnet56.ResNet56(input_shape=(32, 32, 3),
133
          classes=cifar_main.NUM_CLASSES)
Shining Sun's avatar
Shining Sun committed
134
135
136
137

  model.compile(loss='categorical_crossentropy',
                optimizer=optimizer,
                metrics=['categorical_accuracy'],
Shining Sun's avatar
Shining Sun committed
138
                distribute=strategy)
Shining Sun's avatar
Shining Sun committed
139

140
141
  time_callback, tensorboard_callback, lr_callback = keras_common.get_callbacks(
      learning_rate_schedule, cifar_main.NUM_IMAGES['train'])
142

Shining Sun's avatar
Shining Sun committed
143
144
145
146
147
148
149
  train_steps = cifar_main.NUM_IMAGES['train'] // flags_obj.batch_size
  train_epochs = flags_obj.train_epochs

  if flags_obj.train_steps:
    train_steps = min(flags_obj.train_steps, train_steps)
    train_epochs = 1

150
  num_eval_steps = (cifar_main.NUM_IMAGES['validation'] //
151
152
153
                    flags_obj.batch_size)

  history = model.fit(train_input_dataset,
Shining Sun's avatar
Shining Sun committed
154
155
                      epochs=train_epochs,
                      steps_per_epoch=train_steps,
156
157
158
                      callbacks=[
                          time_callback,
                          lr_callback,
Shining Sun's avatar
Shining Sun committed
159
                          tensorboard_callback
160
                      ],
161
162
                      validation_steps=num_eval_steps,
                      validation_data=eval_input_dataset,
163
164
                      verbose=1)

165
166
167
168
  if not flags_obj.skip_eval:
      eval_output = model.evaluate(eval_input_dataset,
                                   steps=num_eval_steps,
                                   verbose=1)
169

Shining Sun's avatar
bug fix  
Shining Sun committed
170
  stats = keras_common.analyze_fit_and_eval_result(history, eval_output)
171
172

  return stats
173

174
175

def main(_):
176
  with logger.benchmark_context(flags.FLAGS):
Shining Sun's avatar
Shining Sun committed
177
    run(flags.FLAGS)
178
179
180


if __name__ == '__main__':
181
  tf.logging.set_verbosity(tf.logging.INFO)
182
  cifar_main.define_cifar_flags()
Shining Sun's avatar
Shining Sun committed
183
  keras_common.define_keras_flags()
184
  absl_app.run(main)