Commit b41ff7f1 authored by Neal Wu's avatar Neal Wu
Browse files

Remove name arguments from tf.summary.scalar

parent 1fe7f454
...@@ -232,11 +232,9 @@ def _gather_clone_loss(clone, num_clones, regularization_losses): ...@@ -232,11 +232,9 @@ def _gather_clone_loss(clone, num_clones, regularization_losses):
sum_loss = tf.add_n(all_losses) sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block. # Add the summaries out of the clone device block.
if clone_loss is not None: if clone_loss is not None:
tf.summary.scalar(clone.scope + '/clone_loss', clone_loss, tf.summary.scalar(clone.scope + '/clone_loss', clone_loss)
name='clone_loss')
if regularization_loss is not None: if regularization_loss is not None:
tf.summary.scalar('regularization_loss', regularization_loss, tf.summary.scalar('regularization_loss', regularization_loss)
name='regularization_loss')
return sum_loss return sum_loss
...@@ -404,8 +402,7 @@ def deploy(config, ...@@ -404,8 +402,7 @@ def deploy(config,
if total_loss is not None: if total_loss is not None:
# Add total_loss to summary. # Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss, summaries.add(tf.summary.scalar('total_loss', total_loss))
name='total_loss'))
if summaries: if summaries:
# Merge all summaries together. # Merge all summaries together.
......
...@@ -517,8 +517,7 @@ def main(_): ...@@ -517,8 +517,7 @@ def main(_):
with tf.device(deploy_config.optimizer_device()): with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step) learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate) optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate, summaries.add(tf.summary.scalar('learning_rate', learning_rate))
name='learning_rate'))
if FLAGS.sync_replicas: if FLAGS.sync_replicas:
# If sync_replicas is enabled, the averaging will be done in the chief # If sync_replicas is enabled, the averaging will be done in the chief
...@@ -543,8 +542,7 @@ def main(_): ...@@ -543,8 +542,7 @@ def main(_):
optimizer, optimizer,
var_list=variables_to_train) var_list=variables_to_train)
# Add total_loss to summary. # Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss, summaries.add(tf.summary.scalar('total_loss', total_loss))
name='total_loss'))
# Create gradient updates. # Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients, grad_updates = optimizer.apply_gradients(clones_gradients,
......
...@@ -369,7 +369,7 @@ class VGSLImageModel(object): ...@@ -369,7 +369,7 @@ class VGSLImageModel(object):
if self.mode == 'train': if self.mode == 'train':
# Setup loss for training. # Setup loss for training.
self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func) self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func)
tf.summary.scalar('loss', self.loss, name='loss') tf.summary.scalar('loss', self.loss)
elif out_dims == 0: elif out_dims == 0:
# Be sure the labels match the output, even in eval mode. # Be sure the labels match the output, even in eval mode.
self.labels = tf.slice(self.labels, [0, 0], [-1, 1]) self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
...@@ -484,7 +484,7 @@ class VGSLImageModel(object): ...@@ -484,7 +484,7 @@ class VGSLImageModel(object):
opt = tf.train.AdamOptimizer(learning_rate=learn_rate_dec) opt = tf.train.AdamOptimizer(learning_rate=learn_rate_dec)
else: else:
raise ValueError('Invalid optimizer type: ' + optimizer_type) raise ValueError('Invalid optimizer type: ' + optimizer_type)
tf.summary.scalar('learn_rate', learn_rate_dec, name='lr_summ') tf.summary.scalar('learn_rate', learn_rate_dec)
self.train_op = opt.minimize( self.train_op = opt.minimize(
self.loss, global_step=self.global_step, name='train') self.loss, global_step=self.global_step, name='train')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment