Commit 337c66ed authored by Neal Wu's avatar Neal Wu
Browse files

Updated summaries in the tutorial models to 1.0

parent 2fd3dcf3
...@@ -365,7 +365,7 @@ class Word2Vec(object): ...@@ -365,7 +365,7 @@ class Word2Vec(object):
self._word2id[w] = i self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels) true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits) loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss) tf.summary.scalar("NCE loss", loss)
self._loss = loss self._loss = loss
self.optimize(loss) self.optimize(loss)
......
...@@ -90,8 +90,8 @@ def _activation_summary(x): ...@@ -90,8 +90,8 @@ def _activation_summary(x):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard. # session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x) tf.summary.histogram(tensor_name + '/activations', x)
tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity', tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x)) tf.nn.zero_fraction(x))
...@@ -316,8 +316,8 @@ def _add_loss_summaries(total_loss): ...@@ -316,8 +316,8 @@ def _add_loss_summaries(total_loss):
for l in losses + [total_loss]: for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss # Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name. # as the original loss name.
tf.contrib.deprecated.scalar_summary(l.op.name + ' (raw)', l) tf.summary.scalar(l.op.name + ' (raw)', l)
tf.contrib.deprecated.scalar_summary(l.op.name, loss_averages.average(l)) tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op return loss_averages_op
...@@ -345,7 +345,7 @@ def train(total_loss, global_step): ...@@ -345,7 +345,7 @@ def train(total_loss, global_step):
decay_steps, decay_steps,
LEARNING_RATE_DECAY_FACTOR, LEARNING_RATE_DECAY_FACTOR,
staircase=True) staircase=True)
tf.contrib.deprecated.scalar_summary('learning_rate', lr) tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries. # Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss) loss_averages_op = _add_loss_summaries(total_loss)
...@@ -360,12 +360,12 @@ def train(total_loss, global_step): ...@@ -360,12 +360,12 @@ def train(total_loss, global_step):
# Add histograms for trainable variables. # Add histograms for trainable variables.
for var in tf.trainable_variables(): for var in tf.trainable_variables():
tf.contrib.deprecated.histogram_summary(var.op.name, var) tf.summary.histogram(var.op.name, var)
# Add histograms for gradients. # Add histograms for gradients.
for grad, var in grads: for grad, var in grads:
if grad is not None: if grad is not None:
tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients', grad) tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables. # Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage( variable_averages = tf.train.ExponentialMovingAverage(
......
...@@ -132,7 +132,7 @@ def _generate_image_and_label_batch(image, label, min_queue_examples, ...@@ -132,7 +132,7 @@ def _generate_image_and_label_batch(image, label, min_queue_examples,
capacity=min_queue_examples + 3 * batch_size) capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer. # Display the training images in the visualizer.
tf.contrib.deprecated.image_summary('images', images) tf.summary.image('images', images)
return images, tf.reshape(label_batch, [batch_size]) return images, tf.reshape(label_batch, [batch_size])
......
...@@ -93,7 +93,7 @@ def tower_loss(scope): ...@@ -93,7 +93,7 @@ def tower_loss(scope):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard. # session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name) loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
tf.contrib.deprecated.scalar_summary(loss_name, l) tf.summary.scalar(loss_name, l)
return total_loss return total_loss
...@@ -187,22 +187,19 @@ def train(): ...@@ -187,22 +187,19 @@ def train():
grads = average_gradients(tower_grads) grads = average_gradients(tower_grads)
# Add a summary to track the learning rate. # Add a summary to track the learning rate.
summaries.append(tf.contrib.deprecated.scalar_summary('learning_rate', lr)) summaries.append(tf.summary.scalar('learning_rate', lr))
# Add histograms for gradients. # Add histograms for gradients.
for grad, var in grads: for grad, var in grads:
if grad is not None: if grad is not None:
summaries.append( summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients',
grad))
# Apply the gradients to adjust the shared variables. # Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables. # Add histograms for trainable variables.
for var in tf.trainable_variables(): for var in tf.trainable_variables():
summaries.append( summaries.append(tf.summary.histogram(var.op.name, var))
tf.contrib.deprecated.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables. # Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage( variable_averages = tf.train.ExponentialMovingAverage(
...@@ -216,7 +213,7 @@ def train(): ...@@ -216,7 +213,7 @@ def train():
saver = tf.train.Saver(tf.global_variables()) saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries. # Build the summary operation from the last tower summaries.
summary_op = tf.contrib.deprecated.merge_summary(summaries) summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below. # Build an initialization operation to run below.
init = tf.global_variables_initializer() init = tf.global_variables_initializer()
......
...@@ -334,14 +334,14 @@ def main(_): ...@@ -334,14 +334,14 @@ def main(_):
train_input = PTBInput(config=config, data=train_data, name="TrainInput") train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer): with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input) m = PTBModel(is_training=True, config=config, input_=train_input)
tf.scalar_summary("Training Loss", m.cost) tf.summary.scalar("Training Loss", m.cost)
tf.scalar_summary("Learning Rate", m.lr) tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"): with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer): with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input) mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
tf.scalar_summary("Validation Loss", mvalid.cost) tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"): with tf.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data, name="TestInput") test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment