resnet_run_loop.py 24.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utility and supporting functions for ResNet.

  This module contains ResNet code which does not directly build layers. This
includes dataset management, hyperparameter and optimizer code, and argument
parsing. Code for defining the ResNet layers can be found in resnet_model.py.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

26
import functools
Taylor Robie's avatar
Taylor Robie committed
27
import math
28
29
import os

30
# pylint: disable=g-bad-import-order
31
from absl import flags
32
import tensorflow as tf
33
34

from official.resnet import resnet_model
35
from official.utils.flags import core as flags_core
36
from official.utils.export import export
37
38
from official.utils.logs import hooks_helper
from official.utils.logs import logger
39
from official.resnet import imagenet_preprocessing
40
from official.utils.misc import distribution_utils
41
from official.utils.misc import model_helpers
42
# pylint: enable=g-bad-import-order
43
44
45
46
47
48


################################################################################
# Functions for input processing.
################################################################################
def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer,
Taylor Robie's avatar
Taylor Robie committed
49
                           parse_record_fn, num_epochs=1, num_gpus=None,
50
                           examples_per_epoch=None, dtype=tf.float32):
Karmel Allison's avatar
Karmel Allison committed
51
  """Given a Dataset with raw records, return an iterator over the records.
52
53
54
55
56
57
58
59
60
61
62

  Args:
    dataset: A Dataset representing raw records
    is_training: A boolean denoting whether the input is for training.
    batch_size: The number of samples per batch.
    shuffle_buffer: The buffer size to use when shuffling records. A larger
      value results in better randomness, but smaller values reduce startup
      time and use less memory.
    parse_record_fn: A function that takes a raw record and returns the
      corresponding (image, label) pair.
    num_epochs: The number of epochs to repeat the dataset.
Taylor Robie's avatar
Taylor Robie committed
63
64
    num_gpus: The number of gpus used for training.
    examples_per_epoch: The number of examples in an epoch.
65
    dtype: Data type to use for images/features.
66
67
68
69

  Returns:
    Dataset of (image, label) pairs ready for iteration.
  """
70

71
72
  # Prefetches a batch at a time to smooth out the time taken to load input
  # files for shuffling and processing.
73
74
  dataset = dataset.prefetch(buffer_size=batch_size)
  if is_training:
75
    # Shuffles records before repeating to respect epoch boundaries.
76
77
    dataset = dataset.shuffle(buffer_size=shuffle_buffer)

78
  # Repeats the dataset for the number of epochs to train.
79
80
  dataset = dataset.repeat(num_epochs)

81
  # Parses the raw records into images and labels.
82
83
  dataset = dataset.apply(
      tf.contrib.data.map_and_batch(
84
          lambda value: parse_record_fn(value, is_training, dtype),
85
          batch_size=batch_size,
Toby Boyd's avatar
Toby Boyd committed
86
          num_parallel_calls=1,
87
          drop_remainder=False))
88
89
90
91

  # Operations between the final prefetch and the get_next call to the iterator
  # will happen synchronously during run time. We prefetch here again to
  # background all of the above processing work and keep it out of the
92
93
94
  # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
  # allows DistributionStrategies to adjust how many batches to fetch based
  # on how many devices are present.
95
  dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
96
97
98
99

  return dataset


Toby Boyd's avatar
Toby Boyd committed
100
101
102
def get_synth_input_fn(height, width, num_channels, num_classes,
                       dtype=tf.float32):
  """Returns an input function that returns a dataset with random data.
103

Toby Boyd's avatar
Toby Boyd committed
104
105
106
107
  This input_fn returns a data set that iterates over a set of random data and
  bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
  copy is still included. This used to find the upper throughput bound when
  tunning the full input pipeline.
108
109
110
111
112
113
114

  Args:
    height: Integer height that will be used to create a fake image tensor.
    width: Integer width that will be used to create a fake image tensor.
    num_channels: Integer depth that will be used to create a fake image tensor.
    num_classes: Number of classes that should be represented in the fake labels
      tensor
Toby Boyd's avatar
Toby Boyd committed
115
    dtype: Data type for features/images.
116
117
118
119
120

  Returns:
    An input_fn that can be used in place of a real one to return a dataset
    that can be used for iteration.
  """
Toby Boyd's avatar
Toby Boyd committed
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  # pylint: disable=unused-argument
  def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
    """Returns dataset filled with random data."""
    # Synthetic input should be within [0, 255].
    inputs = tf.truncated_normal(
        [batch_size] + [height, width, num_channels],
        dtype=dtype,
        mean=127,
        stddev=60,
        name='synthetic_inputs')

    labels = tf.random_uniform(
        [batch_size],
        minval=0,
        maxval=num_classes - 1,
        dtype=tf.int32,
        name='synthetic_labels')
    data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
    data = data.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
    return data
141
142
143
144

  return input_fn


145
def image_bytes_serving_input_fn(image_shape, dtype=tf.float32):
146
147
148
149
150
  """Serving input fn for raw jpeg images."""

  def _preprocess_image(image_bytes):
    """Preprocess a single raw image."""
    # Bounding box around the whole image.
151
    bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])
152
153
154
155
156
157
158
159
    height, width, num_channels = image_shape
    image = imagenet_preprocessing.preprocess_image(
        image_bytes, bbox, height, width, num_channels, is_training=False)
    return image

  image_bytes_list = tf.placeholder(
      shape=[None], dtype=tf.string, name='input_tensor')
  images = tf.map_fn(
160
      _preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
161
162
163
164
  return tf.estimator.export.TensorServingInputReceiver(
      images, {'image_bytes': image_bytes_list})


165
166
167
168
################################################################################
# Functions for running training/eval/validation loops for the model.
################################################################################
def learning_rate_with_decay(
169
170
    batch_size, batch_denom, num_images, boundary_epochs, decay_rates,
    base_lr=0.1, warmup=False):
171
172
173
174
175
176
177
178
179
180
181
  """Get a learning rate that decays step-wise as training progresses.

  Args:
    batch_size: the number of examples processed in each training batch.
    batch_denom: this value will be used to scale the base learning rate.
      `0.1 * batch size` is divided by this number, such that when
      batch_denom == batch_size, the initial learning rate will be 0.1.
    num_images: total number of images that will be used for training.
    boundary_epochs: list of ints representing the epochs at which we
      decay the learning rate.
    decay_rates: list of floats representing the decay rates to be used
182
183
      for scaling the learning rate. It should have one more element
      than `boundary_epochs`, and all elements should have the same type.
184
185
    base_lr: Initial learning rate scaled based on batch_denom.
    warmup: Run a 5 epoch warmup to the initial lr.
186
187
188
189
190
  Returns:
    Returns a function that takes a single argument - the number of batches
    trained so far (global_step)- and returns the learning rate to be used
    for training the next batch.
  """
191
  initial_learning_rate = base_lr * batch_size / batch_denom
192
193
  batches_per_epoch = num_images / batch_size

Taylor Robie's avatar
Taylor Robie committed
194
195
196
  # Reduce the learning rate at certain epochs.
  # CIFAR-10: divide by 10 at epoch 100, 150, and 200
  # ImageNet: divide by 10 at epoch 30, 60, 80, and 90
197
198
199
200
  boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
  vals = [initial_learning_rate * decay for decay in decay_rates]

  def learning_rate_fn(global_step):
201
202
203
204
205
206
207
208
209
    """Builds scaled learning rate function with 5 epoch warm up."""
    lr = tf.train.piecewise_constant(global_step, boundaries, vals)
    if warmup:
      warmup_steps = int(batches_per_epoch * 5)
      warmup_lr = (
          initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(
              warmup_steps, tf.float32))
      return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
    return lr
210
211
212
213
214
215

  return learning_rate_fn


def resnet_model_fn(features, labels, mode, model_class,
                    resnet_size, weight_decay, learning_rate_fn, momentum,
216
                    data_format, resnet_version, loss_scale,
Zac Wellmer's avatar
Zac Wellmer committed
217
218
                    loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE,
                    fine_tune=False):
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
  """Shared functionality for different resnet model_fns.

  Initializes the ResnetModel representing the model layers
  and uses that model to build the necessary EstimatorSpecs for
  the `mode` in question. For training, this means building losses,
  the optimizer, and the train op that get passed into the EstimatorSpec.
  For evaluation and prediction, the EstimatorSpec is returned without
  a train op, but with the necessary parameters for the given mode.

  Args:
    features: tensor representing input images
    labels: tensor representing class labels for all input images
    mode: current estimator mode; should be one of
      `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
    model_class: a class representing a TensorFlow model that has a __call__
      function. We assume here that this is a subclass of ResnetModel.
    resnet_size: A single integer for the size of the ResNet model.
    weight_decay: weight decay loss rate used to regularize learned variables.
    learning_rate_fn: function that returns the current learning rate given
      the current global_step
    momentum: momentum term used for optimization
    data_format: Input format ('channels_last', 'channels_first', or None).
      If set to None, the format is dependent on whether a GPU is available.
242
243
    resnet_version: Integer representing which version of the ResNet network to
      use. See README for details. Valid values: [1, 2]
244
245
    loss_scale: The factor to scale the loss for numerical stability. A detailed
      summary is present in the arg parser help text.
246
247
248
249
    loss_filter_fn: function that takes a string variable name and returns
      True if the var should be included in loss calculation, and False
      otherwise. If None, batch_normalization variables will be excluded
      from the loss.
250
    dtype: the TensorFlow dtype to use for calculations.
Zac Wellmer's avatar
Zac Wellmer committed
251
    fine_tune: If True only train the dense layers(final layers).
252
253
254
255
256
257
258
259

  Returns:
    EstimatorSpec parameterized according to the input params and the
    current mode.
  """

  # Generate a summary node for the images
  tf.summary.image('images', features, max_outputs=6)
260
261
  # Checks that features/images have same data type being used for calculations.
  assert features.dtype == dtype
262

263
264
  model = model_class(resnet_size, data_format, resnet_version=resnet_version,
                      dtype=dtype)
265

266
267
  logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)

268
269
270
271
272
  # This acts as a no-op if the logits are already in fp32 (provided logits are
  # not a SparseTensor). If dtype is is low precision, logits must be cast to
  # fp32 for numerical stability.
  logits = tf.cast(logits, tf.float32)

273
274
275
276
277
278
  predictions = {
      'classes': tf.argmax(logits, axis=1),
      'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
  }

  if mode == tf.estimator.ModeKeys.PREDICT:
279
280
281
282
283
284
285
    # Return the predictions and the specification for serving a SavedModel
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        export_outputs={
            'predict': tf.estimator.export.PredictOutput(predictions)
        })
286
287

  # Calculate loss, which includes softmax cross entropy and L2 regularization.
288
289
  cross_entropy = tf.losses.sparse_softmax_cross_entropy(
      logits=logits, labels=labels)
290
291
292
293
294
295
296

  # Create a tensor named cross_entropy for logging purposes.
  tf.identity(cross_entropy, name='cross_entropy')
  tf.summary.scalar('cross_entropy', cross_entropy)

  # If no loss_filter_fn is passed, assume we want the default behavior,
  # which is that batch_normalization variables are excluded from loss.
Karmel Allison's avatar
Karmel Allison committed
297
298
299
  def exclude_batch_norm(name):
    return 'batch_normalization' not in name
  loss_filter_fn = loss_filter_fn or exclude_batch_norm
300
301

  # Add weight decay to the loss.
302
  l2_loss = weight_decay * tf.add_n(
303
304
      # loss is computed using fp32 for numerical stability.
      [tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()
305
       if loss_filter_fn(v.name)])
306
307
  tf.summary.scalar('l2_loss', l2_loss)
  loss = cross_entropy + l2_loss
308
309
310
311
312
313
314
315
316
317
318
319

  if mode == tf.estimator.ModeKeys.TRAIN:
    global_step = tf.train.get_or_create_global_step()

    learning_rate = learning_rate_fn(global_step)

    # Create a tensor named learning_rate for logging purposes
    tf.identity(learning_rate, name='learning_rate')
    tf.summary.scalar('learning_rate', learning_rate)

    optimizer = tf.train.MomentumOptimizer(
        learning_rate=learning_rate,
320
321
        momentum=momentum
    )
322

Zac Wellmer's avatar
Zac Wellmer committed
323
    def _dense_grad_filter(gvs):
324
325
326
327
      """Only apply gradient updates to the final layer.

      This function is used for fine tuning.

Zac Wellmer's avatar
Zac Wellmer committed
328
      Args:
329
        gvs: list of tuples with gradients and variable info
Zac Wellmer's avatar
Zac Wellmer committed
330
      Returns:
331
332
        filtered gradients so that only the dense layer remains
      """
Zac Wellmer's avatar
Zac Wellmer committed
333
334
      return [(g, v) for g, v in gvs if 'dense' in v.name]

335
336
337
338
339
340
    if loss_scale != 1:
      # When computing fp16 gradients, often intermediate tensor values are
      # so small, they underflow to 0. To avoid this, we multiply the loss by
      # loss_scale to make these tensor values loss_scale times bigger.
      scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)

Zac Wellmer's avatar
Zac Wellmer committed
341
342
343
      if fine_tune:
        scaled_grad_vars = _dense_grad_filter(scaled_grad_vars)

344
345
346
347
348
349
      # Once the gradient computation is complete we can scale the gradients
      # back to the correct scale before passing them to the optimizer.
      unscaled_grad_vars = [(grad / loss_scale, var)
                            for grad, var in scaled_grad_vars]
      minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
    else:
Zac Wellmer's avatar
Zac Wellmer committed
350
351
352
353
      grad_vars = optimizer.compute_gradients(loss)
      if fine_tune:
        grad_vars = _dense_grad_filter(grad_vars)
      minimize_op = optimizer.apply_gradients(grad_vars, global_step)
354

355
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
356
    train_op = tf.group(minimize_op, update_ops)
357
358
359
  else:
    train_op = None

360
  accuracy = tf.metrics.accuracy(labels, predictions['classes'])
361
362
363
364
365
366
  accuracy_top_5 = tf.metrics.mean(tf.nn.in_top_k(predictions=logits,
                                                  targets=labels,
                                                  k=5,
                                                  name='top_5_op'))
  metrics = {'accuracy': accuracy,
             'accuracy_top_5': accuracy_top_5}
367
368
369

  # Create a tensor named train_accuracy for logging purposes
  tf.identity(accuracy[1], name='train_accuracy')
370
  tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
371
  tf.summary.scalar('train_accuracy', accuracy[1])
372
  tf.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
373
374
375
376
377
378
379
380
381

  return tf.estimator.EstimatorSpec(
      mode=mode,
      predictions=predictions,
      loss=loss,
      train_op=train_op,
      eval_metric_ops=metrics)


382
383
def resnet_main(
    flags_obj, model_function, input_function, dataset_name, shape=None):
384
385
386
  """Shared main loop for ResNet Models.

  Args:
387
388
    flags_obj: An object containing parsed flags. See define_resnet_flags()
      for details.
389
390
391
392
393
    model_function: the function that instantiates the Model and builds the
      ops for train/eval. This will be passed directly into the estimator.
    input_function: the function that processes the dataset and returns a
      dataset that the estimator can train on. This will be wrapped with
      all the relevant flags for running and passed to estimator.
394
395
    dataset_name: the name of the dataset for training and evaluation. This is
      used for logging purpose.
396
    shape: list of ints representing the shape of the images used for training.
397
      This is only used if flags_obj.export_dir is passed.
398
  """
Karmel Allison's avatar
Karmel Allison committed
399

400
401
  model_helpers.apply_clean(flags.FLAGS)

402
403
404
405
406
407
408
409
  # Using the Winograd non-fused algorithms provides a small performance boost.
  os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'

  # Create session config based on values of inter_op_parallelism_threads and
  # intra_op_parallelism_threads. Note that we default to having
  # allow_soft_placement = True, which is required for multi-GPU and not
  # harmful for other modes.
  session_config = tf.ConfigProto(
410
411
      inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
      intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
412
413
      allow_soft_placement=True)

414
415
  distribution_strategy = distribution_utils.get_distribution_strategy(
      flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
416

417
418
  run_config = tf.estimator.RunConfig(
      train_distribute=distribution_strategy, session_config=session_config)
419

Zac Wellmer's avatar
Zac Wellmer committed
420
421
422
423
424
425
426
427
  # initialize our model with all but the dense layer from pretrained resnet
  if flags_obj.pretrained_model_checkpoint_path is not None:
    warm_start_settings = tf.estimator.WarmStartSettings(
        flags_obj.pretrained_model_checkpoint_path,
        vars_to_warm_start='^(?!.*dense)')
  else:
    warm_start_settings = None

428
  classifier = tf.estimator.Estimator(
429
      model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config,
Zac Wellmer's avatar
Zac Wellmer committed
430
      warm_start_from=warm_start_settings, params={
431
432
433
          'resnet_size': int(flags_obj.resnet_size),
          'data_format': flags_obj.data_format,
          'batch_size': flags_obj.batch_size,
434
          'resnet_version': int(flags_obj.resnet_version),
435
          'loss_scale': flags_core.get_loss_scale(flags_obj),
Zac Wellmer's avatar
Zac Wellmer committed
436
437
          'dtype': flags_core.get_tf_dtype(flags_obj),
          'fine_tune': flags_obj.fine_tune
438
439
      })

440
441
442
443
  run_params = {
      'batch_size': flags_obj.batch_size,
      'dtype': flags_core.get_tf_dtype(flags_obj),
      'resnet_size': flags_obj.resnet_size,
444
      'resnet_version': flags_obj.resnet_version,
445
446
447
      'synthetic_data': flags_obj.use_synthetic_data,
      'train_epochs': flags_obj.train_epochs,
  }
448
  if flags_obj.use_synthetic_data:
449
    dataset_name = dataset_name + '-synthetic'
450

451
  benchmark_logger = logger.get_benchmark_logger()
452
453
  benchmark_logger.log_run_info('resnet', dataset_name, run_params,
                                test_id=flags_obj.benchmark_test_id)
454

455
  train_hooks = hooks_helper.get_train_hooks(
456
      flags_obj.hooks,
457
      model_dir=flags_obj.model_dir,
458
      batch_size=flags_obj.batch_size)
459

Taylor Robie's avatar
Taylor Robie committed
460
  def input_fn_train(num_epochs):
461
462
    return input_function(
        is_training=True, data_dir=flags_obj.data_dir,
463
        batch_size=distribution_utils.per_device_batch_size(
464
            flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
Taylor Robie's avatar
Taylor Robie committed
465
        num_epochs=num_epochs,
466
467
        num_gpus=flags_core.get_num_gpus(flags_obj),
        dtype=flags_core.get_tf_dtype(flags_obj))
468

469
  def input_fn_eval():
470
471
    return input_function(
        is_training=False, data_dir=flags_obj.data_dir,
472
        batch_size=distribution_utils.per_device_batch_size(
473
            flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
474
475
        num_epochs=1,
        dtype=flags_core.get_tf_dtype(flags_obj))
Taylor Robie's avatar
Taylor Robie committed
476

Taylor Robie's avatar
Taylor Robie committed
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
  if flags_obj.eval_only or not flags_obj.train_epochs:
    # If --eval_only is set, perform a single loop with zero train epochs.
    schedule, n_loops = [0], 1
  else:
    # Compute the number of times to loop while training. All but the last
    # pass will train for `epochs_between_evals` epochs, while the last will
    # train for the number needed to reach `training_epochs`. For instance if
    #   train_epochs = 25 and epochs_between_evals = 10
    # schedule will be set to [10, 10, 5]. That is to say, the loop will:
    #   Train for 10 epochs and then evaluate.
    #   Train for another 10 epochs and then evaluate.
    #   Train for a final 5 epochs (to reach 25 epochs) and then evaluate.
    n_loops = math.ceil(flags_obj.train_epochs / flags_obj.epochs_between_evals)
    schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))]
    schedule[-1] = flags_obj.train_epochs - sum(schedule[:-1])  # over counting.

  for cycle_index, num_train_epochs in enumerate(schedule):
    tf.logging.info('Starting cycle: %d/%d', cycle_index, int(n_loops))

    if num_train_epochs:
      classifier.train(input_fn=lambda: input_fn_train(num_train_epochs),
                       hooks=train_hooks, max_steps=flags_obj.max_train_steps)
499

500
    tf.logging.info('Starting to evaluate.')
501
502
503
504
505

    # flags_obj.max_train_steps is generally associated with testing and
    # profiling. As a result it is frequently called with synthetic data, which
    # will iterate forever. Passing steps=flags_obj.max_train_steps allows the
    # eval (which is generally unimportant in those circumstances) to terminate.
506
507
508
    # Note that eval will run for max_train_steps each loop, regardless of the
    # global_step count.
    eval_results = classifier.evaluate(input_fn=input_fn_eval,
509
                                       steps=flags_obj.max_train_steps)
510

Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
511
    benchmark_logger.log_evaluation_result(eval_results)
512

513
    if model_helpers.past_stop_threshold(
514
        flags_obj.stop_threshold, eval_results['accuracy']):
515
516
      break

517
  if flags_obj.export_dir is not None:
518
    # Exports a saved model for the given classifier.
519
    export_dtype = flags_core.get_tf_dtype(flags_obj)
520
    if flags_obj.image_bytes_as_serving_input:
521
522
      input_receiver_fn = functools.partial(
          image_bytes_serving_input_fn, shape, dtype=export_dtype)
523
524
    else:
      input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
525
526
527
          shape, batch_size=flags_obj.batch_size, dtype=export_dtype)
    classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn,
                                 strip_default_attrs=True)
528
529


530
531
532
def define_resnet_flags(resnet_size_choices=None):
  """Add flags and validators for ResNet."""
  flags_core.define_base()
533
  flags_core.define_performance(num_parallel_calls=False)
534
535
536
  flags_core.define_image()
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)
537

538
  flags.DEFINE_enum(
Toby Boyd's avatar
Toby Boyd committed
539
      name='resnet_version', short_name='rv', default='1',
540
      enum_values=['1', '2'],
541
542
      help=flags_core.help_wrap(
          'Version of ResNet. (1 or 2) See README.md for details.'))
Zac Wellmer's avatar
Zac Wellmer committed
543
544
545
546
547
548
549
550
551
  flags.DEFINE_bool(
      name='fine_tune', short_name='ft', default=False,
      help=flags_core.help_wrap(
          'If True do not train any parameters except for the final layer.'))
  flags.DEFINE_string(
      name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
      help=flags_core.help_wrap(
          'If not None initialize all the network except the final layer with '
          'these values'))
Taylor Robie's avatar
Taylor Robie committed
552
  flags.DEFINE_boolean(
553
      name='eval_only', default=False,
Taylor Robie's avatar
Taylor Robie committed
554
555
      help=flags_core.help_wrap('Skip training and only perform evaluation on '
                                'the latest checkpoint.'))
556
  flags.DEFINE_boolean(
557
      name="image_bytes_as_serving_input", default=False,
558
559
560
561
562
563
564
      help=flags_core.help_wrap(
          'If True exports savedmodel with serving signature that accepts '
          'JPEG image bytes instead of a fixed size [HxWxC] tensor that '
          'represents the image. The former is easier to use for serving at '
          'the expense of image resize/cropping being done as part of model '
          'inference. Note, this flag only applies to ImageNet and cannot '
          'be used for CIFAR.'))
565

566
567
568
  choice_kwargs = dict(
      name='resnet_size', short_name='rs', default='50',
      help=flags_core.help_wrap('The size of the ResNet model to use.'))
569

570
571
572
573
  if resnet_size_choices is None:
    flags.DEFINE_string(**choice_kwargs)
  else:
    flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)