"benchmark/vscode:/vscode.git/clone" did not exist on "50b495f3d82097a6ed5f6138f92b6006e5471884"
Commit 705acc35 authored by Christopher Shallue's avatar Christopher Shallue
Browse files

Replace deprecated functions

parent f653bd23
......@@ -128,6 +128,6 @@ def process_image(encoded_image,
image_summary("final_image", image)
# Rescale to [-1,1] instead of [0, 1]
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
......@@ -181,7 +181,7 @@ def batch_with_dynamic_pad(images_and_captions,
enqueue_list = []
for image, caption in images_and_captions:
caption_length = tf.shape(caption)[0]
input_length = tf.expand_dims(tf.sub(caption_length, 1), 0)
input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0)
input_seq = tf.slice(caption, [0], input_length)
target_seq = tf.slice(caption, [1], input_length)
......
......@@ -244,10 +244,10 @@ class ShowAndTellModel(object):
# This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the
# modified LSTM in the "Show and Tell" paper has no biases and outputs
# new_c * sigmoid(o).
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units=self.config.num_lstm_units, state_is_tuple=True)
if self.mode == "train":
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell = tf.contrib.rnn.DropoutWrapper(
lstm_cell,
input_keep_prob=self.config.lstm_dropout_keep_prob,
output_keep_prob=self.config.lstm_dropout_keep_prob)
......@@ -264,13 +264,13 @@ class ShowAndTellModel(object):
if self.mode == "inference":
# In inference mode, use concatenated states for convenient feeding and
# fetching.
tf.concat(1, initial_state, name="initial_state")
tf.concat_v2(initial_state, 1, name="initial_state")
# Placeholder for feeding a batch of concatenated states.
state_feed = tf.placeholder(dtype=tf.float32,
shape=[None, sum(lstm_cell.state_size)],
name="state_feed")
state_tuple = tf.split(1, 2, state_feed)
state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
# Run a single LSTM step.
lstm_outputs, state_tuple = lstm_cell(
......@@ -278,7 +278,7 @@ class ShowAndTellModel(object):
state=state_tuple)
# Concatentate the resulting state.
tf.concat(1, state_tuple, name="state")
tf.concat_v2(state_tuple, 1, name="state")
else:
# Run the batch of sequence embeddings through the LSTM.
sequence_length = tf.reduce_sum(self.input_mask, 1)
......@@ -307,8 +307,9 @@ class ShowAndTellModel(object):
weights = tf.to_float(tf.reshape(self.input_mask, [-1]))
# Compute losses.
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, targets)
batch_loss = tf.div(tf.reduce_sum(tf.mul(losses, weights)),
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
logits=logits)
batch_loss = tf.div(tf.reduce_sum(tf.multiply(losses, weights)),
tf.reduce_sum(weights),
name="batch_loss")
tf.losses.add_loss(batch_loss)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment