Commit e2ecda2c authored by Neal Wu's avatar Neal Wu
Browse files

Updated the cifar10 model to match the internal version and to be compatible...

Updated the cifar10 model to match the internal version and to be compatible with the latest version of TensorFlow
parent f565b808
...@@ -90,8 +90,8 @@ def _activation_summary(x): ...@@ -90,8 +90,8 @@ def _activation_summary(x):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard. # session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x) tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity',
tf.nn.zero_fraction(x)) tf.nn.zero_fraction(x))
...@@ -134,7 +134,7 @@ def _variable_with_weight_decay(name, shape, stddev, wd): ...@@ -134,7 +134,7 @@ def _variable_with_weight_decay(name, shape, stddev, wd):
shape, shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None: if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss') weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay) tf.add_to_collection('losses', weight_decay)
return var return var
...@@ -316,8 +316,8 @@ def _add_loss_summaries(total_loss): ...@@ -316,8 +316,8 @@ def _add_loss_summaries(total_loss):
for l in losses + [total_loss]: for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss # Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name. # as the original loss name.
tf.scalar_summary(l.op.name + ' (raw)', l) tf.contrib.deprecated.scalar_summary(l.op.name + ' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l)) tf.contrib.deprecated.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op return loss_averages_op
...@@ -345,7 +345,7 @@ def train(total_loss, global_step): ...@@ -345,7 +345,7 @@ def train(total_loss, global_step):
decay_steps, decay_steps,
LEARNING_RATE_DECAY_FACTOR, LEARNING_RATE_DECAY_FACTOR,
staircase=True) staircase=True)
tf.scalar_summary('learning_rate', lr) tf.contrib.deprecated.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries. # Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss) loss_averages_op = _add_loss_summaries(total_loss)
...@@ -360,12 +360,12 @@ def train(total_loss, global_step): ...@@ -360,12 +360,12 @@ def train(total_loss, global_step):
# Add histograms for trainable variables. # Add histograms for trainable variables.
for var in tf.trainable_variables(): for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var) tf.contrib.deprecated.histogram_summary(var.op.name, var)
# Add histograms for gradients. # Add histograms for gradients.
for grad, var in grads: for grad, var in grads:
if grad is not None: if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad) tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables. # Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage( variable_averages = tf.train.ExponentialMovingAverage(
......
...@@ -132,7 +132,7 @@ def _generate_image_and_label_batch(image, label, min_queue_examples, ...@@ -132,7 +132,7 @@ def _generate_image_and_label_batch(image, label, min_queue_examples,
capacity=min_queue_examples + 3 * batch_size) capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer. # Display the training images in the visualizer.
tf.image_summary('images', images) tf.contrib.deprecated.image_summary('images', images)
return images, tf.reshape(label_batch, [batch_size]) return images, tf.reshape(label_batch, [batch_size])
......
...@@ -93,7 +93,7 @@ def tower_loss(scope): ...@@ -93,7 +93,7 @@ def tower_loss(scope):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard. # session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name) loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
tf.scalar_summary(loss_name, l) tf.contrib.deprecated.scalar_summary(loss_name, l)
return total_loss return total_loss
...@@ -187,13 +187,13 @@ def train(): ...@@ -187,13 +187,13 @@ def train():
grads = average_gradients(tower_grads) grads = average_gradients(tower_grads)
# Add a summary to track the learning rate. # Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr)) summaries.append(tf.contrib.deprecated.scalar_summary('learning_rate', lr))
# Add histograms for gradients. # Add histograms for gradients.
for grad, var in grads: for grad, var in grads:
if grad is not None: if grad is not None:
summaries.append( summaries.append(
tf.histogram_summary(var.op.name + '/gradients', tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients',
grad)) grad))
# Apply the gradients to adjust the shared variables. # Apply the gradients to adjust the shared variables.
...@@ -202,7 +202,7 @@ def train(): ...@@ -202,7 +202,7 @@ def train():
# Add histograms for trainable variables. # Add histograms for trainable variables.
for var in tf.trainable_variables(): for var in tf.trainable_variables():
summaries.append( summaries.append(
tf.histogram_summary(var.op.name, var)) tf.contrib.deprecated.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables. # Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage( variable_averages = tf.train.ExponentialMovingAverage(
...@@ -216,7 +216,7 @@ def train(): ...@@ -216,7 +216,7 @@ def train():
saver = tf.train.Saver(tf.global_variables()) saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries. # Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries) summary_op = tf.contrib.deprecated.merge_summary(summaries)
# Build an initialization operation to run below. # Build an initialization operation to run below.
init = tf.global_variables_initializer() init = tf.global_variables_initializer()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment