Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
443c0745
Commit
443c0745
authored
May 18, 2017
by
Neal Wu
Browse files
Convert control_flow_ops.with_dependencies to tf.control_dependencies
parent
7a2bcdc5
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
22 additions
and
27 deletions
+22
-27
inception/inception/slim/ops_test.py
inception/inception/slim/ops_test.py
+6
-5
slim/deployment/model_deploy.py
slim/deployment/model_deploy.py
+2
-2
slim/preprocessing/vgg_preprocessing.py
slim/preprocessing/vgg_preprocessing.py
+12
-17
slim/train_image_classifier.py
slim/train_image_classifier.py
+2
-3
No files found.
inception/inception/slim/ops_test.py
View file @
443c0745
...
@@ -21,8 +21,6 @@ from __future__ import print_function
...
@@ -21,8 +21,6 @@ from __future__ import print_function
import
numpy
as
np
import
numpy
as
np
import
tensorflow
as
tf
import
tensorflow
as
tf
from
tensorflow.python.ops
import
control_flow_ops
from
inception.slim
import
ops
from
inception.slim
import
ops
from
inception.slim
import
scopes
from
inception.slim
import
scopes
from
inception.slim
import
variables
from
inception.slim
import
variables
...
@@ -602,7 +600,8 @@ class BatchNormTest(tf.test.TestCase):
...
@@ -602,7 +600,8 @@ class BatchNormTest(tf.test.TestCase):
update_ops
=
tf
.
get_collection
(
ops
.
UPDATE_OPS_COLLECTION
)
update_ops
=
tf
.
get_collection
(
ops
.
UPDATE_OPS_COLLECTION
)
with
tf
.
control_dependencies
(
update_ops
):
with
tf
.
control_dependencies
(
update_ops
):
barrier
=
tf
.
no_op
(
name
=
'gradient_barrier'
)
barrier
=
tf
.
no_op
(
name
=
'gradient_barrier'
)
output
=
control_flow_ops
.
with_dependencies
([
barrier
],
output
)
with
tf
.
control_dependencies
([
barrier
]):
output
=
output
# Initialize all variables
# Initialize all variables
sess
.
run
(
tf
.
global_variables_initializer
())
sess
.
run
(
tf
.
global_variables_initializer
())
moving_mean
=
variables
.
get_variables
(
'BatchNorm/moving_mean'
)[
0
]
moving_mean
=
variables
.
get_variables
(
'BatchNorm/moving_mean'
)[
0
]
...
@@ -632,7 +631,8 @@ class BatchNormTest(tf.test.TestCase):
...
@@ -632,7 +631,8 @@ class BatchNormTest(tf.test.TestCase):
update_ops
=
tf
.
get_collection
(
ops
.
UPDATE_OPS_COLLECTION
)
update_ops
=
tf
.
get_collection
(
ops
.
UPDATE_OPS_COLLECTION
)
with
tf
.
control_dependencies
(
update_ops
):
with
tf
.
control_dependencies
(
update_ops
):
barrier
=
tf
.
no_op
(
name
=
'gradient_barrier'
)
barrier
=
tf
.
no_op
(
name
=
'gradient_barrier'
)
output
=
control_flow_ops
.
with_dependencies
([
barrier
],
output
)
with
tf
.
control_dependencies
([
barrier
]):
output
=
output
# Initialize all variables
# Initialize all variables
sess
.
run
(
tf
.
global_variables_initializer
())
sess
.
run
(
tf
.
global_variables_initializer
())
moving_mean
=
variables
.
get_variables
(
'BatchNorm/moving_mean'
)[
0
]
moving_mean
=
variables
.
get_variables
(
'BatchNorm/moving_mean'
)[
0
]
...
@@ -666,7 +666,8 @@ class BatchNormTest(tf.test.TestCase):
...
@@ -666,7 +666,8 @@ class BatchNormTest(tf.test.TestCase):
update_ops
=
tf
.
get_collection
(
ops
.
UPDATE_OPS_COLLECTION
)
update_ops
=
tf
.
get_collection
(
ops
.
UPDATE_OPS_COLLECTION
)
with
tf
.
control_dependencies
(
update_ops
):
with
tf
.
control_dependencies
(
update_ops
):
barrier
=
tf
.
no_op
(
name
=
'gradient_barrier'
)
barrier
=
tf
.
no_op
(
name
=
'gradient_barrier'
)
output
=
control_flow_ops
.
with_dependencies
([
barrier
],
output
)
with
tf
.
control_dependencies
([
barrier
]):
output
=
output
# Initialize all variables
# Initialize all variables
sess
.
run
(
tf
.
global_variables_initializer
())
sess
.
run
(
tf
.
global_variables_initializer
())
moving_mean
=
variables
.
get_variables
(
'BatchNorm/moving_mean'
)[
0
]
moving_mean
=
variables
.
get_variables
(
'BatchNorm/moving_mean'
)[
0
]
...
...
slim/deployment/model_deploy.py
View file @
443c0745
...
@@ -378,8 +378,8 @@ def deploy(config,
...
@@ -378,8 +378,8 @@ def deploy(config,
update_ops
.
append
(
grad_updates
)
update_ops
.
append
(
grad_updates
)
update_op
=
tf
.
group
(
*
update_ops
)
update_op
=
tf
.
group
(
*
update_ops
)
train_op
=
control_flow_ops
.
with
_dependencies
([
update_op
]
,
total_loss
,
with
tf
.
control
_dependencies
([
update_op
]
):
name
=
'train_op'
)
train_op
=
total_loss
else
:
else
:
clones_losses
=
[]
clones_losses
=
[]
regularization_losses
=
tf
.
get_collection
(
regularization_losses
=
tf
.
get_collection
(
...
...
slim/preprocessing/vgg_preprocessing.py
View file @
443c0745
...
@@ -34,8 +34,6 @@ from __future__ import print_function
...
@@ -34,8 +34,6 @@ from __future__ import print_function
import
tensorflow
as
tf
import
tensorflow
as
tf
from
tensorflow.python.ops
import
control_flow_ops
slim
=
tf
.
contrib
.
slim
slim
=
tf
.
contrib
.
slim
_R_MEAN
=
123.68
_R_MEAN
=
123.68
...
@@ -71,9 +69,8 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width):
...
@@ -71,9 +69,8 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width):
rank_assertion
=
tf
.
Assert
(
rank_assertion
=
tf
.
Assert
(
tf
.
equal
(
tf
.
rank
(
image
),
3
),
tf
.
equal
(
tf
.
rank
(
image
),
3
),
[
'Rank of image must be equal to 3.'
])
[
'Rank of image must be equal to 3.'
])
cropped_shape
=
control_flow_ops
.
with_dependencies
(
with
tf
.
control_dependencies
([
rank_assertion
]):
[
rank_assertion
],
cropped_shape
=
tf
.
stack
([
crop_height
,
crop_width
,
original_shape
[
2
]])
tf
.
stack
([
crop_height
,
crop_width
,
original_shape
[
2
]]))
size_assertion
=
tf
.
Assert
(
size_assertion
=
tf
.
Assert
(
tf
.
logical_and
(
tf
.
logical_and
(
...
@@ -85,9 +82,8 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width):
...
@@ -85,9 +82,8 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width):
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
# define the crop size.
image
=
control_flow_ops
.
with_dependencies
(
with
tf
.
control_dependencies
([
size_assertion
]):
[
size_assertion
],
image
=
tf
.
slice
(
image
,
offsets
,
cropped_shape
)
tf
.
slice
(
image
,
offsets
,
cropped_shape
))
return
tf
.
reshape
(
image
,
cropped_shape
)
return
tf
.
reshape
(
image
,
cropped_shape
)
...
@@ -126,9 +122,8 @@ def _random_crop(image_list, crop_height, crop_width):
...
@@ -126,9 +122,8 @@ def _random_crop(image_list, crop_height, crop_width):
image_list
[
i
].
name
,
3
,
image_rank
])
image_list
[
i
].
name
,
3
,
image_rank
])
rank_assertions
.
append
(
rank_assert
)
rank_assertions
.
append
(
rank_assert
)
image_shape
=
control_flow_ops
.
with_dependencies
(
with
tf
.
control_dependencies
([
rank_assertions
[
0
]]):
[
rank_assertions
[
0
]],
image_shape
=
tf
.
shape
(
image_list
[
0
])
tf
.
shape
(
image_list
[
0
]))
image_height
=
image_shape
[
0
]
image_height
=
image_shape
[
0
]
image_width
=
image_shape
[
1
]
image_width
=
image_shape
[
1
]
crop_size_assert
=
tf
.
Assert
(
crop_size_assert
=
tf
.
Assert
(
...
@@ -142,8 +137,8 @@ def _random_crop(image_list, crop_height, crop_width):
...
@@ -142,8 +137,8 @@ def _random_crop(image_list, crop_height, crop_width):
for
i
in
range
(
1
,
len
(
image_list
)):
for
i
in
range
(
1
,
len
(
image_list
)):
image
=
image_list
[
i
]
image
=
image_list
[
i
]
asserts
.
append
(
rank_assertions
[
i
])
asserts
.
append
(
rank_assertions
[
i
])
shape
=
control_flow_ops
.
with
_dependencies
([
rank_assertions
[
i
]]
,
with
tf
.
control
_dependencies
([
rank_assertions
[
i
]]
):
tf
.
shape
(
image
)
)
shape
=
tf
.
shape
(
image
)
height
=
shape
[
0
]
height
=
shape
[
0
]
width
=
shape
[
1
]
width
=
shape
[
1
]
...
@@ -162,10 +157,10 @@ def _random_crop(image_list, crop_height, crop_width):
...
@@ -162,10 +157,10 @@ def _random_crop(image_list, crop_height, crop_width):
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
# generates random numbers at graph definition time.
max_offset_height
=
control_flow_ops
.
with
_dependencies
(
with
tf
.
control
_dependencies
(
asserts
):
asserts
,
tf
.
reshape
(
image_height
-
crop_height
+
1
,
[])
)
max_offset_height
=
tf
.
reshape
(
image_height
-
crop_height
+
1
,
[])
max_offset_
wi
d
th
=
control_
flow_ops
.
with_
dependencies
(
with
tf
.
control_dependencies
(
asserts
):
asserts
,
tf
.
reshape
(
image_width
-
crop_width
+
1
,
[])
)
max_offset_width
=
tf
.
reshape
(
image_width
-
crop_width
+
1
,
[])
offset_height
=
tf
.
random_uniform
(
offset_height
=
tf
.
random_uniform
(
[],
maxval
=
max_offset_height
,
dtype
=
tf
.
int32
)
[],
maxval
=
max_offset_height
,
dtype
=
tf
.
int32
)
offset_width
=
tf
.
random_uniform
(
offset_width
=
tf
.
random_uniform
(
...
...
slim/train_image_classifier.py
View file @
443c0745
...
@@ -20,7 +20,6 @@ from __future__ import print_function
...
@@ -20,7 +20,6 @@ from __future__ import print_function
import
tensorflow
as
tf
import
tensorflow
as
tf
from
tensorflow.python.ops
import
control_flow_ops
from
datasets
import
dataset_factory
from
datasets
import
dataset_factory
from
deployment
import
model_deploy
from
deployment
import
model_deploy
from
nets
import
nets_factory
from
nets
import
nets_factory
...
@@ -540,8 +539,8 @@ def main(_):
...
@@ -540,8 +539,8 @@ def main(_):
update_ops
.
append
(
grad_updates
)
update_ops
.
append
(
grad_updates
)
update_op
=
tf
.
group
(
*
update_ops
)
update_op
=
tf
.
group
(
*
update_ops
)
train_tensor
=
control_flow_ops
.
with
_dependencies
([
update_op
]
,
total_loss
,
with
tf
.
control
_dependencies
([
update_op
]
):
name
=
'train_op'
)
train_tensor
=
total_loss
# Add the summaries from the first clone. These contain the summaries
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
# created by model_fn and either optimize_clones() or _gather_clone_loss().
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment