Commit b787cf10 authored by Neal Wu's avatar Neal Wu Committed by GitHub
Browse files

Merge pull request #1487 from tensorflow/control-flow-ops

Convert control_flow_ops.with_dependencies to tf.control_dependencies
parents c9244885 99f9442b
...@@ -21,8 +21,6 @@ from __future__ import print_function ...@@ -21,8 +21,6 @@ from __future__ import print_function
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from inception.slim import ops from inception.slim import ops
from inception.slim import scopes from inception.slim import scopes
from inception.slim import variables from inception.slim import variables
...@@ -420,7 +418,7 @@ class DropoutTest(tf.test.TestCase): ...@@ -420,7 +418,7 @@ class DropoutTest(tf.test.TestCase):
with self.test_session(): with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1) images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.dropout(images) output = ops.dropout(images)
self.assertEquals(output.op.name, 'Dropout/dropout/mul_1') self.assertEquals(output.op.name, 'Dropout/dropout/mul')
output.get_shape().assert_is_compatible_with(images.get_shape()) output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutNoTraining(self): def testCreateDropoutNoTraining(self):
...@@ -601,8 +599,7 @@ class BatchNormTest(tf.test.TestCase): ...@@ -601,8 +599,7 @@ class BatchNormTest(tf.test.TestCase):
output = ops.batch_norm(images, decay=0.1) output = ops.batch_norm(images, decay=0.1)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops): with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='gradient_barrier') output = tf.identity(output)
output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables # Initialize all variables
sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
...@@ -631,8 +628,7 @@ class BatchNormTest(tf.test.TestCase): ...@@ -631,8 +628,7 @@ class BatchNormTest(tf.test.TestCase):
output = ops.batch_norm(images, decay=0.1, is_training=False) output = ops.batch_norm(images, decay=0.1, is_training=False)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops): with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='gradient_barrier') output = tf.identity(output)
output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables # Initialize all variables
sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
...@@ -665,8 +661,7 @@ class BatchNormTest(tf.test.TestCase): ...@@ -665,8 +661,7 @@ class BatchNormTest(tf.test.TestCase):
output = ops.batch_norm(images, decay=0.1, is_training=False) output = ops.batch_norm(images, decay=0.1, is_training=False)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops): with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='gradient_barrier') output = tf.identity(output)
output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables # Initialize all variables
sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
......
...@@ -378,8 +378,8 @@ def deploy(config, ...@@ -378,8 +378,8 @@ def deploy(config,
update_ops.append(grad_updates) update_ops.append(grad_updates)
update_op = tf.group(*update_ops) update_op = tf.group(*update_ops)
train_op = control_flow_ops.with_dependencies([update_op], total_loss, with tf.control_dependencies([update_op]):
name='train_op') train_op = tf.identity(total_loss, name='train_op')
else: else:
clones_losses = [] clones_losses = []
regularization_losses = tf.get_collection( regularization_losses = tf.get_collection(
......
...@@ -34,8 +34,6 @@ from __future__ import print_function ...@@ -34,8 +34,6 @@ from __future__ import print_function
import tensorflow as tf import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
slim = tf.contrib.slim slim = tf.contrib.slim
_R_MEAN = 123.68 _R_MEAN = 123.68
...@@ -71,9 +69,8 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width): ...@@ -71,9 +69,8 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width):
rank_assertion = tf.Assert( rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3), tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.']) ['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies( with tf.control_dependencies([rank_assertion]):
[rank_assertion], cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert( size_assertion = tf.Assert(
tf.logical_and( tf.logical_and(
...@@ -85,9 +82,8 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width): ...@@ -85,9 +82,8 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width):
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size. # define the crop size.
image = control_flow_ops.with_dependencies( with tf.control_dependencies([size_assertion]):
[size_assertion], image = tf.slice(image, offsets, cropped_shape)
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape) return tf.reshape(image, cropped_shape)
...@@ -126,9 +122,8 @@ def _random_crop(image_list, crop_height, crop_width): ...@@ -126,9 +122,8 @@ def _random_crop(image_list, crop_height, crop_width):
image_list[i].name, 3, image_rank]) image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert) rank_assertions.append(rank_assert)
image_shape = control_flow_ops.with_dependencies( with tf.control_dependencies([rank_assertions[0]]):
[rank_assertions[0]], image_shape = tf.shape(image_list[0])
tf.shape(image_list[0]))
image_height = image_shape[0] image_height = image_shape[0]
image_width = image_shape[1] image_width = image_shape[1]
crop_size_assert = tf.Assert( crop_size_assert = tf.Assert(
...@@ -142,8 +137,8 @@ def _random_crop(image_list, crop_height, crop_width): ...@@ -142,8 +137,8 @@ def _random_crop(image_list, crop_height, crop_width):
for i in range(1, len(image_list)): for i in range(1, len(image_list)):
image = image_list[i] image = image_list[i]
asserts.append(rank_assertions[i]) asserts.append(rank_assertions[i])
shape = control_flow_ops.with_dependencies([rank_assertions[i]], with tf.control_dependencies([rank_assertions[i]]):
tf.shape(image)) shape = tf.shape(image)
height = shape[0] height = shape[0]
width = shape[1] width = shape[1]
...@@ -162,10 +157,10 @@ def _random_crop(image_list, crop_height, crop_width): ...@@ -162,10 +157,10 @@ def _random_crop(image_list, crop_height, crop_width):
# Use tf.random_uniform and not numpy.random.rand as doing the former would # Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which # generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time. # generates random numbers at graph definition time.
max_offset_height = control_flow_ops.with_dependencies( with tf.control_dependencies(asserts):
asserts, tf.reshape(image_height - crop_height + 1, [])) max_offset_height = tf.reshape(image_height - crop_height + 1, [])
max_offset_width = control_flow_ops.with_dependencies( with tf.control_dependencies(asserts):
asserts, tf.reshape(image_width - crop_width + 1, [])) max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform( offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32) [], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform( offset_width = tf.random_uniform(
......
...@@ -20,7 +20,6 @@ from __future__ import print_function ...@@ -20,7 +20,6 @@ from __future__ import print_function
import tensorflow as tf import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from datasets import dataset_factory from datasets import dataset_factory
from deployment import model_deploy from deployment import model_deploy
from nets import nets_factory from nets import nets_factory
...@@ -540,8 +539,8 @@ def main(_): ...@@ -540,8 +539,8 @@ def main(_):
update_ops.append(grad_updates) update_ops.append(grad_updates)
update_op = tf.group(*update_ops) update_op = tf.group(*update_ops)
train_tensor = control_flow_ops.with_dependencies([update_op], total_loss, with tf.control_dependencies([update_op]):
name='train_op') train_tensor = tf.identity(total_loss, name='train_op')
# Add the summaries from the first clone. These contain the summaries # Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss(). # created by model_fn and either optimize_clones() or _gather_clone_loss().
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment