"torchvision/git@developer.sourcefind.cn:OpenDAS/vision.git" did not exist on "662373f6057bb0d39eaf6e5fde3083639ed93af3"
Commit 250debf2 authored by Chris Shallue's avatar Chris Shallue Committed by GitHub
Browse files

Merge pull request #881 from cshallue/master

Update im2txt model after changes to TensorFlow API
parents 2fa6057a a46f826f
...@@ -41,7 +41,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -41,7 +41,7 @@ class InceptionV3Test(tf.test.TestCase):
def _countInceptionParameters(self): def _countInceptionParameters(self):
"""Counts the number of parameters in the inception model at top scope.""" """Counts the number of parameters in the inception model at top scope."""
counter = {} counter = {}
for v in tf.all_variables(): for v in tf.global_variables():
name_tokens = v.op.name.split("/") name_tokens = v.op.name.split("/")
if name_tokens[0] == "InceptionV3": if name_tokens[0] == "InceptionV3":
name = "InceptionV3/" + name_tokens[1] name = "InceptionV3/" + name_tokens[1]
...@@ -85,7 +85,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -85,7 +85,7 @@ class InceptionV3Test(tf.test.TestCase):
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts() self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.VARIABLES) self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES) self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(188, tf.GraphKeys.UPDATE_OPS) self._assertCollectionSize(188, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES) self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES)
...@@ -98,7 +98,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -98,7 +98,7 @@ class InceptionV3Test(tf.test.TestCase):
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts() self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.VARIABLES) self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES) self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS) self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES) self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES)
...@@ -111,7 +111,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -111,7 +111,7 @@ class InceptionV3Test(tf.test.TestCase):
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts() self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.VARIABLES) self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES) self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS) self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES) self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES)
...@@ -124,7 +124,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -124,7 +124,7 @@ class InceptionV3Test(tf.test.TestCase):
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts() self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.VARIABLES) self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES) self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS) self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES) self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES)
......
...@@ -92,7 +92,7 @@ def process_image(encoded_image, ...@@ -92,7 +92,7 @@ def process_image(encoded_image,
# only logged in thread 0. # only logged in thread 0.
def image_summary(name, image): def image_summary(name, image):
if not thread_id: if not thread_id:
tf.image_summary(name, tf.expand_dims(image, 0)) tf.summary.image(name, tf.expand_dims(image, 0))
# Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1). # Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1).
with tf.name_scope("decode", values=[encoded_image]): with tf.name_scope("decode", values=[encoded_image]):
...@@ -128,6 +128,6 @@ def process_image(encoded_image, ...@@ -128,6 +128,6 @@ def process_image(encoded_image,
image_summary("final_image", image) image_summary("final_image", image)
# Rescale to [-1,1] instead of [0, 1] # Rescale to [-1,1] instead of [0, 1]
image = tf.sub(image, 0.5) image = tf.subtract(image, 0.5)
image = tf.mul(image, 2.0) image = tf.multiply(image, 2.0)
return image return image
...@@ -116,7 +116,7 @@ def prefetch_input_data(reader, ...@@ -116,7 +116,7 @@ def prefetch_input_data(reader,
enqueue_ops.append(values_queue.enqueue([value])) enqueue_ops.append(values_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
values_queue, enqueue_ops)) values_queue, enqueue_ops))
tf.scalar_summary( tf.summary.scalar(
"queue/%s/fraction_of_%d_full" % (values_queue.name, capacity), "queue/%s/fraction_of_%d_full" % (values_queue.name, capacity),
tf.cast(values_queue.size(), tf.float32) * (1. / capacity)) tf.cast(values_queue.size(), tf.float32) * (1. / capacity))
...@@ -181,7 +181,7 @@ def batch_with_dynamic_pad(images_and_captions, ...@@ -181,7 +181,7 @@ def batch_with_dynamic_pad(images_and_captions,
enqueue_list = [] enqueue_list = []
for image, caption in images_and_captions: for image, caption in images_and_captions:
caption_length = tf.shape(caption)[0] caption_length = tf.shape(caption)[0]
input_length = tf.expand_dims(tf.sub(caption_length, 1), 0) input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0)
input_seq = tf.slice(caption, [0], input_length) input_seq = tf.slice(caption, [0], input_length)
target_seq = tf.slice(caption, [1], input_length) target_seq = tf.slice(caption, [1], input_length)
...@@ -197,8 +197,8 @@ def batch_with_dynamic_pad(images_and_captions, ...@@ -197,8 +197,8 @@ def batch_with_dynamic_pad(images_and_captions,
if add_summaries: if add_summaries:
lengths = tf.add(tf.reduce_sum(mask, 1), 1) lengths = tf.add(tf.reduce_sum(mask, 1), 1)
tf.scalar_summary("caption_length/batch_min", tf.reduce_min(lengths)) tf.summary.scalar("caption_length/batch_min", tf.reduce_min(lengths))
tf.scalar_summary("caption_length/batch_max", tf.reduce_max(lengths)) tf.summary.scalar("caption_length/batch_max", tf.reduce_max(lengths))
tf.scalar_summary("caption_length/batch_mean", tf.reduce_mean(lengths)) tf.summary.scalar("caption_length/batch_mean", tf.reduce_mean(lengths))
return images, input_seqs, target_seqs, mask return images, input_seqs, target_seqs, mask
...@@ -244,10 +244,10 @@ class ShowAndTellModel(object): ...@@ -244,10 +244,10 @@ class ShowAndTellModel(object):
# This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the # This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the
# modified LSTM in the "Show and Tell" paper has no biases and outputs # modified LSTM in the "Show and Tell" paper has no biases and outputs
# new_c * sigmoid(o). # new_c * sigmoid(o).
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell( lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units=self.config.num_lstm_units, state_is_tuple=True) num_units=self.config.num_lstm_units, state_is_tuple=True)
if self.mode == "train": if self.mode == "train":
lstm_cell = tf.nn.rnn_cell.DropoutWrapper( lstm_cell = tf.contrib.rnn.DropoutWrapper(
lstm_cell, lstm_cell,
input_keep_prob=self.config.lstm_dropout_keep_prob, input_keep_prob=self.config.lstm_dropout_keep_prob,
output_keep_prob=self.config.lstm_dropout_keep_prob) output_keep_prob=self.config.lstm_dropout_keep_prob)
...@@ -264,13 +264,13 @@ class ShowAndTellModel(object): ...@@ -264,13 +264,13 @@ class ShowAndTellModel(object):
if self.mode == "inference": if self.mode == "inference":
# In inference mode, use concatenated states for convenient feeding and # In inference mode, use concatenated states for convenient feeding and
# fetching. # fetching.
tf.concat(1, initial_state, name="initial_state") tf.concat_v2(initial_state, 1, name="initial_state")
# Placeholder for feeding a batch of concatenated states. # Placeholder for feeding a batch of concatenated states.
state_feed = tf.placeholder(dtype=tf.float32, state_feed = tf.placeholder(dtype=tf.float32,
shape=[None, sum(lstm_cell.state_size)], shape=[None, sum(lstm_cell.state_size)],
name="state_feed") name="state_feed")
state_tuple = tf.split(1, 2, state_feed) state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
# Run a single LSTM step. # Run a single LSTM step.
lstm_outputs, state_tuple = lstm_cell( lstm_outputs, state_tuple = lstm_cell(
...@@ -278,7 +278,7 @@ class ShowAndTellModel(object): ...@@ -278,7 +278,7 @@ class ShowAndTellModel(object):
state=state_tuple) state=state_tuple)
# Concatentate the resulting state. # Concatentate the resulting state.
tf.concat(1, state_tuple, name="state") tf.concat_v2(state_tuple, 1, name="state")
else: else:
# Run the batch of sequence embeddings through the LSTM. # Run the batch of sequence embeddings through the LSTM.
sequence_length = tf.reduce_sum(self.input_mask, 1) sequence_length = tf.reduce_sum(self.input_mask, 1)
...@@ -307,18 +307,19 @@ class ShowAndTellModel(object): ...@@ -307,18 +307,19 @@ class ShowAndTellModel(object):
weights = tf.to_float(tf.reshape(self.input_mask, [-1])) weights = tf.to_float(tf.reshape(self.input_mask, [-1]))
# Compute losses. # Compute losses.
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, targets) losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
batch_loss = tf.div(tf.reduce_sum(tf.mul(losses, weights)), logits=logits)
batch_loss = tf.div(tf.reduce_sum(tf.multiply(losses, weights)),
tf.reduce_sum(weights), tf.reduce_sum(weights),
name="batch_loss") name="batch_loss")
tf.contrib.losses.add_loss(batch_loss) tf.losses.add_loss(batch_loss)
total_loss = tf.contrib.losses.get_total_loss() total_loss = tf.losses.get_total_loss()
# Add summaries. # Add summaries.
tf.scalar_summary("batch_loss", batch_loss) tf.summary.scalar("losses/batch_loss", batch_loss)
tf.scalar_summary("total_loss", total_loss) tf.summary.scalar("losses/total_loss", total_loss)
for var in tf.trainable_variables(): for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var) tf.summary.histogram("parameters/" + var.op.name, var)
self.total_loss = total_loss self.total_loss = total_loss
self.target_cross_entropy_losses = losses # Used in evaluation. self.target_cross_entropy_losses = losses # Used in evaluation.
......
...@@ -63,7 +63,7 @@ class ShowAndTellModelTest(tf.test.TestCase): ...@@ -63,7 +63,7 @@ class ShowAndTellModelTest(tf.test.TestCase):
def _countModelParameters(self): def _countModelParameters(self):
"""Counts the number of parameters in the model at top level scope.""" """Counts the number of parameters in the model at top level scope."""
counter = {} counter = {}
for v in tf.all_variables(): for v in tf.global_variables():
name = v.op.name.split("/")[0] name = v.op.name.split("/")[0]
num_params = v.get_shape().num_elements() num_params = v.get_shape().num_elements()
assert num_params assert num_params
...@@ -98,7 +98,7 @@ class ShowAndTellModelTest(tf.test.TestCase): ...@@ -98,7 +98,7 @@ class ShowAndTellModelTest(tf.test.TestCase):
fetches = expected_shapes.keys() fetches = expected_shapes.keys()
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer())
outputs = sess.run(fetches, feed_dict) outputs = sess.run(fetches, feed_dict)
for index, output in enumerate(outputs): for index, output in enumerate(outputs):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment