Unverified Commit ba2b8e00 authored by Yukun Zhu's avatar Yukun Zhu Committed by GitHub
Browse files

Merge pull request #3850 from dheera/master

Add input images, labels, and output semantic predictions to summary
parents 3a6fff33 1c47fab2
...@@ -95,7 +95,6 @@ ORIGINAL_IMAGE = 'original_image' ...@@ -95,7 +95,6 @@ ORIGINAL_IMAGE = 'original_image'
# Test set name. # Test set name.
TEST_SET = 'test' TEST_SET = 'test'
class ModelOptions( class ModelOptions(
collections.namedtuple('ModelOptions', [ collections.namedtuple('ModelOptions', [
'outputs_to_num_classes', 'outputs_to_num_classes',
......
...@@ -67,6 +67,9 @@ flags.DEFINE_integer('save_interval_secs', 1200, ...@@ -67,6 +67,9 @@ flags.DEFINE_integer('save_interval_secs', 1200,
flags.DEFINE_integer('save_summaries_secs', 600, flags.DEFINE_integer('save_summaries_secs', 600,
'How often, in seconds, we compute the summaries.') 'How often, in seconds, we compute the summaries.')
flags.DEFINE_boolean('save_summaries_images', False,
'Save sample inputs, labels, and semantic predictions as images to summary.')
# Settings for training strategy. # Settings for training strategy.
flags.DEFINE_enum('learning_policy', 'poly', ['poly', 'step'], flags.DEFINE_enum('learning_policy', 'poly', ['poly', 'step'],
...@@ -178,6 +181,10 @@ def _build_deeplab(inputs_queue, outputs_to_num_classes, ignore_label): ...@@ -178,6 +181,10 @@ def _build_deeplab(inputs_queue, outputs_to_num_classes, ignore_label):
""" """
samples = inputs_queue.dequeue() samples = inputs_queue.dequeue()
# add name to input and label nodes so we can add to summary
samples[common.IMAGE] = tf.identity(samples[common.IMAGE], name = common.IMAGE)
samples[common.LABEL] = tf.identity(samples[common.LABEL], name = common.LABEL)
model_options = common.ModelOptions( model_options = common.ModelOptions(
outputs_to_num_classes=outputs_to_num_classes, outputs_to_num_classes=outputs_to_num_classes,
crop_size=FLAGS.train_crop_size, crop_size=FLAGS.train_crop_size,
...@@ -191,6 +198,12 @@ def _build_deeplab(inputs_queue, outputs_to_num_classes, ignore_label): ...@@ -191,6 +198,12 @@ def _build_deeplab(inputs_queue, outputs_to_num_classes, ignore_label):
is_training=True, is_training=True,
fine_tune_batch_norm=FLAGS.fine_tune_batch_norm) fine_tune_batch_norm=FLAGS.fine_tune_batch_norm)
# add name to graph node so we can add to summary
outputs_to_scales_to_logits[common.OUTPUT_TYPE][model._MERGED_LOGITS_SCOPE] = tf.identity(
outputs_to_scales_to_logits[common.OUTPUT_TYPE][model._MERGED_LOGITS_SCOPE],
name = common.OUTPUT_TYPE
)
for output, num_classes in six.iteritems(outputs_to_num_classes): for output, num_classes in six.iteritems(outputs_to_num_classes):
train_utils.add_softmax_cross_entropy_loss_for_each_scale( train_utils.add_softmax_cross_entropy_loss_for_each_scale(
outputs_to_scales_to_logits[output], outputs_to_scales_to_logits[output],
...@@ -227,7 +240,7 @@ def main(unused_argv): ...@@ -227,7 +240,7 @@ def main(unused_argv):
tf.gfile.MakeDirs(FLAGS.train_logdir) tf.gfile.MakeDirs(FLAGS.train_logdir)
tf.logging.info('Training on %s set', FLAGS.train_split) tf.logging.info('Training on %s set', FLAGS.train_split)
with tf.Graph().as_default(): with tf.Graph().as_default() as graph:
with tf.device(config.inputs_device()): with tf.device(config.inputs_device()):
samples = input_generator.get( samples = input_generator.get(
dataset, dataset,
...@@ -268,6 +281,22 @@ def main(unused_argv): ...@@ -268,6 +281,22 @@ def main(unused_argv):
for model_var in slim.get_model_variables(): for model_var in slim.get_model_variables():
summaries.add(tf.summary.histogram(model_var.op.name, model_var)) summaries.add(tf.summary.histogram(model_var.op.name, model_var))
# Add summaries for images, labels, semantic predictions
if FLAGS.save_summaries_images:
summary_image = graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.IMAGE)).strip('/'))
summaries.add(tf.summary.image('samples/%s' % common.IMAGE, summary_image))
summary_label = tf.cast(graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.LABEL)).strip('/')),
tf.uint8)
summaries.add(tf.summary.image('samples/%s' % common.LABEL, summary_label))
predictions = tf.cast(tf.expand_dims(tf.argmax(graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.OUTPUT_TYPE)).strip('/')),
3), -1), tf.uint8)
summaries.add(tf.summary.image('samples/%s' % common.OUTPUT_TYPE, predictions))
# Add summaries for losses. # Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope): for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss)) summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment