Commit 052e5e8b authored by Neal Wu's avatar Neal Wu
Browse files

Converted the models repo to TF 1.0 using the upgrade script

parent f21c4278
......@@ -158,7 +158,7 @@ def inception_v3_base(inputs,
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
......@@ -182,7 +182,7 @@ def inception_v3_base(inputs,
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
......@@ -205,7 +205,7 @@ def inception_v3_base(inputs,
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
......@@ -224,7 +224,7 @@ def inception_v3_base(inputs,
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(3, [branch_0, branch_1, branch_2])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
......@@ -253,7 +253,7 @@ def inception_v3_base(inputs,
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
......@@ -282,7 +282,7 @@ def inception_v3_base(inputs,
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_6: 17 x 17 x 768.
......@@ -310,7 +310,7 @@ def inception_v3_base(inputs,
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
......@@ -339,7 +339,7 @@ def inception_v3_base(inputs,
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
......@@ -361,7 +361,7 @@ def inception_v3_base(inputs,
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(3, [branch_0, branch_1, branch_2])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_9: 8 x 8 x 2048.
......@@ -371,21 +371,21 @@ def inception_v3_base(inputs,
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(3, [
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(3, [
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
......@@ -396,21 +396,21 @@ def inception_v3_base(inputs,
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(3, [
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(3, [
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
......
......@@ -49,7 +49,7 @@ def block_inception_a(inputs, scope=None, reuse=None):
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_a(inputs, scope=None, reuse=None):
......@@ -69,7 +69,7 @@ def block_reduction_a(inputs, scope=None, reuse=None):
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(3, [branch_0, branch_1, branch_2])
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_b(inputs, scope=None, reuse=None):
......@@ -93,7 +93,7 @@ def block_inception_b(inputs, scope=None, reuse=None):
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_b(inputs, scope=None, reuse=None):
......@@ -115,7 +115,7 @@ def block_reduction_b(inputs, scope=None, reuse=None):
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(3, [branch_0, branch_1, branch_2])
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_c(inputs, scope=None, reuse=None):
......@@ -128,20 +128,20 @@ def block_inception_c(inputs, scope=None, reuse=None):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(3, [
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat(3, [
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
......@@ -192,7 +192,7 @@ def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',
scope='Conv2d_0a_3x3')
net = tf.concat(3, [branch_0, branch_1])
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_3a', net): return net, end_points
# 73 x 73 x 160
......@@ -207,7 +207,7 @@ def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
net = tf.concat(3, [branch_0, branch_1])
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_4a', net): return net, end_points
# 71 x 71 x 192
......@@ -218,7 +218,7 @@ def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(3, [branch_0, branch_1])
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_5a', net): return net, end_points
# 35 x 35 x 384
......
......@@ -41,7 +41,7 @@ def overfeat_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer):
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
......@@ -107,7 +107,7 @@ def overfeat(inputs,
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer,
biases_initializer=tf.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
......
......@@ -58,7 +58,7 @@ def vgg_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer):
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
......
......@@ -45,7 +45,7 @@ def preprocess_for_train(image,
Returns:
A preprocessed image.
"""
tf.image_summary('image', tf.expand_dims(image, 0))
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
......@@ -58,7 +58,7 @@ def preprocess_for_train(image,
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
tf.image_summary('distorted_image', tf.expand_dims(distorted_image, 0))
tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0))
# Because these operations are not commutative, consider randomizing
# the order their operation.
......@@ -67,7 +67,7 @@ def preprocess_for_train(image,
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_whitening(distorted_image)
return tf.image.per_image_standardization(distorted_image)
def preprocess_for_eval(image, output_height, output_width):
......@@ -81,7 +81,7 @@ def preprocess_for_eval(image, output_height, output_width):
Returns:
A preprocessed image.
"""
tf.image_summary('image', tf.expand_dims(image, 0))
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
......@@ -89,10 +89,10 @@ def preprocess_for_eval(image, output_height, output_width):
resized_image = tf.image.resize_image_with_crop_or_pad(image,
output_width,
output_height)
tf.image_summary('resized_image', tf.expand_dims(resized_image, 0))
tf.summary.image('resized_image', tf.expand_dims(resized_image, 0))
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_whitening(resized_image)
return tf.image.per_image_standardization(resized_image)
def preprocess_image(image, output_height, output_width, is_training=False):
......
......@@ -192,7 +192,7 @@ def preprocess_for_train(image, height, width, bbox,
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.image_summary('image_with_bounding_boxes', image_with_box)
tf.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
......@@ -200,7 +200,7 @@ def preprocess_for_train(image, height, width, bbox,
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
tf.image_summary('images_with_distorted_bounding_box',
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
......@@ -215,7 +215,7 @@ def preprocess_for_train(image, height, width, bbox,
lambda x, method: tf.image.resize_images(x, [height, width], method=method),
num_cases=num_resize_cases)
tf.image_summary('cropped_resized_image',
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
......@@ -227,10 +227,10 @@ def preprocess_for_train(image, height, width, bbox,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=4)
tf.image_summary('final_distorted_image',
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.sub(distorted_image, 0.5)
distorted_image = tf.mul(distorted_image, 2.0)
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
......@@ -270,8 +270,8 @@ def preprocess_for_eval(image, height, width,
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
......
......@@ -39,6 +39,6 @@ def preprocess_image(image, output_height, output_width, is_training):
image = tf.to_float(image)
image = tf.image.resize_image_with_crop_or_pad(
image, output_width, output_height)
image = tf.sub(image, 128.0)
image = tf.subtract(image, 128.0)
image = tf.div(image, 128.0)
return image
......@@ -73,7 +73,7 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width):
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.pack([crop_height, crop_width, original_shape[2]]))
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
......@@ -81,7 +81,7 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width):
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0]))
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
......@@ -227,10 +227,10 @@ def _mean_image_subtraction(image, means):
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(2, num_channels, image)
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(2, channels)
return tf.concat(axis=2, values=channels)
def _smallest_size_at_least(height, width, smallest_side):
......
......@@ -316,8 +316,8 @@ def _configure_optimizer(learning_rate):
def _add_variables_summaries(learning_rate):
summaries = []
for variable in slim.get_model_variables():
summaries.append(tf.histogram_summary(variable.op.name, variable))
summaries.append(tf.scalar_summary('training/Learning Rate', learning_rate))
summaries.append(tf.summary.histogram(variable.op.name, variable))
summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
return summaries
......@@ -489,17 +489,17 @@ def main(_):
end_points = clones[0].outputs
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.histogram_summary('activations/' + end_point, x))
summaries.add(tf.scalar_summary('sparsity/' + end_point,
summaries.add(tf.summary.histogram('activations/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity/' + end_point,
tf.nn.zero_fraction(x)))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.scalar_summary('losses/%s' % loss.op.name, loss))
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.histogram_summary(variable.op.name, variable))
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
......@@ -517,7 +517,7 @@ def main(_):
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.scalar_summary('learning_rate', learning_rate,
summaries.add(tf.summary.scalar('learning_rate', learning_rate,
name='learning_rate'))
if FLAGS.sync_replicas:
......@@ -543,7 +543,7 @@ def main(_):
optimizer,
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.scalar_summary('total_loss', total_loss,
summaries.add(tf.summary.scalar('total_loss', total_loss,
name='total_loss'))
# Create gradient updates.
......@@ -561,7 +561,7 @@ def main(_):
first_clone_scope))
# Merge all summaries together.
summary_op = tf.merge_summary(list(summaries), name='summary_op')
summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
......
......@@ -92,7 +92,7 @@ def rnn_helper(inp,
elif direction == "backward":
out = backward
else:
out = tf.concat(2, [forward, backward])
out = tf.concat(axis=2, values=[forward, backward])
return out
......@@ -183,7 +183,7 @@ def lstm_layer(inp,
with tf.variable_scope(name):
if backward:
if length is None:
inp = tf.reverse(inp, [False, True, False])
inp = tf.reverse(inp, [1])
else:
inp = tf.reverse_sequence(inp, length, 1, 0)
......@@ -217,14 +217,14 @@ def lstm_layer(inp,
batch_size = shapes.tensor_dim(inp, dim=0)
num_frames = shapes.tensor_dim(inp, dim=1)
prev = tf.reshape(inp, tf.pack([batch_size * num_frames, num_prev]))
prev = tf.reshape(inp, tf.stack([batch_size * num_frames, num_prev]))
if use_native_weights:
with tf.variable_scope("LSTMCell"):
b = tf.get_variable(
"B",
shape=[4 * num_nodes],
initializer=tf.zeros_initializer,
initializer=tf.zeros_initializer(),
dtype=tf.float32)
biases = tf.identity(b, name="biases")
else:
......@@ -236,17 +236,17 @@ def lstm_layer(inp,
biases, name="biases_reg"))
prev = tf.nn.xw_plus_b(prev, w_i_m, biases)
prev = tf.reshape(prev, tf.pack([batch_size, num_frames, 4, num_nodes]))
prev = tf.reshape(prev, tf.stack([batch_size, num_frames, 4, num_nodes]))
if state is None:
state = tf.fill(tf.pack([batch_size, num_nodes]), 0.0)
state = tf.fill(tf.stack([batch_size, num_nodes]), 0.0)
if memory is None:
memory = tf.fill(tf.pack([batch_size, num_nodes]), 0.0)
memory = tf.fill(tf.stack([batch_size, num_nodes]), 0.0)
out, _, mem = rnn.variable_lstm(prev, state, memory, w_m_m, clip=clip)
if backward:
if length is None:
out = tf.reverse(out, [False, True, False])
out = tf.reverse(out, [1])
else:
out = tf.reverse_sequence(out, length, 1, 0)
......
......@@ -79,7 +79,7 @@ def ImageInput(input_pattern, num_threads, shape, using_ctc, reader=None):
# Give the images a nice name as well.
images = tf.identity(images, name='Images')
tf.image_summary('Images', images)
tf.summary.image('Images', images)
return images, heights, widths, labels, sparse_labels, truths
......@@ -145,6 +145,6 @@ def _ImageProcessing(image_buffer, shape):
image = tf.image.decode_png(image_buffer, channels=shape.depth)
image.set_shape([shape.height, shape.width, shape.depth])
image = tf.cast(image, tf.float32)
image = tf.sub(image, 128.0)
image = tf.mul(image, 1 / 100.0)
image = tf.subtract(image, 128.0)
image = tf.multiply(image, 1 / 100.0)
return image
......@@ -147,7 +147,7 @@ def Eval(train_dir,
sequence_error=None)
with tf.Graph().as_default():
model = InitNetwork(eval_data, model_str, 'eval', reader=reader)
sw = tf.train.SummaryWriter(eval_dir)
sw = tf.summary.FileWriter(eval_dir)
while True:
sess = tf.Session('')
......@@ -369,7 +369,7 @@ class VGSLImageModel(object):
if self.mode == 'train':
# Setup loss for training.
self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func)
tf.scalar_summary('loss', self.loss, name='loss')
tf.summary.scalar('loss', self.loss, name='loss')
elif out_dims == 0:
# Be sure the labels match the output, even in eval mode.
self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
......@@ -484,7 +484,7 @@ class VGSLImageModel(object):
opt = tf.train.AdamOptimizer(learning_rate=learn_rate_dec)
else:
raise ValueError('Invalid optimizer type: ' + optimizer_type)
tf.scalar_summary('learn_rate', learn_rate_dec, name='lr_summ')
tf.summary.scalar('learn_rate', learn_rate_dec, name='lr_summ')
self.train_op = opt.minimize(
self.loss, global_step=self.global_step, name='train')
......
......@@ -149,7 +149,7 @@ class VGSLSpecs(object):
else:
lengths = tf.ones_like(lengths)
if factor != 1:
lengths = tf.mul(lengths, tf.cast(factor, tf.float32))
lengths = tf.multiply(lengths, tf.cast(factor, tf.float32))
return tf.cast(lengths, tf.int32)
def BuildFromString(self, prev_layer, index):
......@@ -235,7 +235,7 @@ class VGSLSpecs(object):
final_factors = self.reduction_factors
if index == len(self.model_str):
raise ValueError('Missing ) at end of parallel!' + self.model_str)
return tf.concat(num_dims - 1, layers), index + 1
return tf.concat(axis=num_dims - 1, values=layers), index + 1
def AddConvLayer(self, prev_layer, index):
"""Add a single standard convolutional layer.
......@@ -342,7 +342,7 @@ class VGSLSpecs(object):
factor1 = tf.cast(self.reduction_factors[i], tf.float32)
factor2 = tf.cast(prev_shape[i], tf.float32)
divisor = tf.cast(result_shape[i], tf.float32)
self.reduction_factors[i] = tf.div(tf.mul(factor1, factor2), divisor)
self.reduction_factors[i] = tf.div(tf.multiply(factor1, factor2), divisor)
return layer, m.end()
def AddFCLayer(self, prev_layer, index):
......@@ -401,7 +401,7 @@ class VGSLSpecs(object):
name + '_forward')
back = self._LSTMLayer(prev_layer, 'backward', dim, True, depth,
name + '_reverse')
return tf.concat(3, [fwd, back], name=name + '_concat'), m.end()
return tf.concat(axis=3, values=[fwd, back], name=name + '_concat'), m.end()
if direction == 'f':
direction = 'forward'
elif direction == 'r':
......
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
......@@ -135,8 +135,8 @@ def count_matrix_input(filenames, submatrix_rows, submatrix_cols):
sparse_local_col = features['sparse_local_col'].values
sparse_count = features['sparse_value'].values
sparse_indices = tf.concat([tf.expand_dims(sparse_local_row, 1),
tf.expand_dims(sparse_local_col, 1)], 1)
sparse_indices = tf.concat(axis=[tf.expand_dims(sparse_local_row, 1),
tf.expand_dims(sparse_local_col, 1)], values=1)
count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
sparse_count)
......
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
......@@ -69,7 +69,7 @@ def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
if allow_weights:
# Multiply by weights, reshaping to allow broadcast.
broadcast_weights_shape = tf.concat([tf.shape(weights), [1]], 0)
broadcast_weights_shape = tf.concat(axis=[tf.shape(weights), [1]], values=0)
embeddings *= tf.reshape(weights, broadcast_weights_shape)
# Sum embeddings by index.
......@@ -330,7 +330,7 @@ class GreedyParser(object):
i,
return_average=return_average))
last_layer = tf.concat(embeddings, 1)
last_layer = tf.concat(axis=embeddings, values=1)
last_layer_size = self.embedding_size
# Create ReLU layers.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment