Unverified Commit 451906e4 authored by pkulzc's avatar pkulzc Committed by GitHub
Browse files

Release MobileDet code and model, and require tf_slim installation for OD API. (#8562)



* Merged commit includes the following changes:
311933687  by Sergio Guadarrama:

    Removes spurios use of tf.compat.v2, which results in spurious tf.compat.v1.compat.v2. Adds basic test to nasnet_utils.
    Replaces all remaining import tensorflow as tf with import tensorflow.compat.v1 as tf

--
311766063  by Sergio Guadarrama:

    Removes explicit tf.compat.v1 in all call sites (we already import tf.compat.v1, so this code was  doing tf.compat.v1.compat.v1). The existing code worked in latest version of tensorflow, 2.2, (and 1.15) but not in 1.14 or in 2.0.0a, this CL fixes it.

--
311624958  by Sergio Guadarrama:

    Updates README that doesn't render properly in github documentation

--
310980959  by Sergio Guadarrama:

    Moves research_models/slim off tf.contrib.slim/layers/framework to tf_slim

--
310263156  by Sergio Guadarrama:

    Adds model breakdown for MobilenetV3

--
308640516  by Sergio Guadarrama:

    Internal change

308244396  by Sergio Guadarrama:

    GroupNormalization support for MobilenetV3.

--
307475800  by Sergio Guadarrama:

    Internal change

--
302077708  by Sergio Guadarrama:

    Remove `disable_tf2` behavior from slim py_library targets

--
301208453  by Sergio Guadarrama:

    Automated refactoring to make code Python 3 compatible.

--
300816672  by Sergio Guadarrama:

    Internal change

299433840  by Sergio Guadarrama:

    Internal change

299221609  by Sergio Guadarrama:

    Explicitly disable Tensorflow v2 behaviors for all TF1.x binaries and tests

--
299179617  by Sergio Guadarrama:

    Internal change

299040784  by Sergio Guadarrama:

    Internal change

299036699  by Sergio Guadarrama:

    Internal change

298736510  by Sergio Guadarrama:

    Internal change

298732599  by Sergio Guadarrama:

    Internal change

298729507  by Sergio Guadarrama:

    Internal change

298253328  by Sergio Guadarrama:

    Internal change

297788346  by Sergio Guadarrama:

    Internal change

297785278  by Sergio Guadarrama:

    Internal change

297783127  by Sergio Guadarrama:

    Internal change

297725870  by Sergio Guadarrama:

    Internal change

297721811  by Sergio Guadarrama:

    Internal change

297711347  by Sergio Guadarrama:

    Internal change

297708059  by Sergio Guadarrama:

    Internal change

297701831  by Sergio Guadarrama:

    Internal change

297700038  by Sergio Guadarrama:

    Internal change

297670468  by Sergio Guadarrama:

    Internal change.

--
297350326  by Sergio Guadarrama:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
297201668  by Sergio Guadarrama:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
294483372  by Sergio Guadarrama:

    Internal change

PiperOrigin-RevId: 311933687

* Merged commit includes the following changes:
312578615  by Menglong Zhu:

    Modify the LSTM feature extractors to be python 3 compatible.

--
311264357  by Menglong Zhu:

    Removes contrib.slim

--
308957207  by Menglong Zhu:

    Automated refactoring to make code Python 3 compatible.

--
306976470  by yongzhe:

    Internal change

306777559  by Menglong Zhu:

    Internal change

--
299232507  by lzyuan:

    Internal update.

--
299221735  by lzyuan:

    Add small epsilon on max_range for quantize_op to prevent range collapse.

--

PiperOrigin-RevId: 312578615

* Merged commit includes the following changes:
310447280  by lzc:

    Internal changes.

--

PiperOrigin-RevId: 310447280
Co-authored-by: default avatarSergio Guadarrama <sguada@google.com>
Co-authored-by: default avatarMenglong Zhu <menglong@google.com>
parent 73b5be67
......@@ -36,7 +36,7 @@ import PIL.ImageFont as ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
......
......@@ -25,15 +25,28 @@ import numpy as np
import PIL.Image as Image
import six
from six.moves import range
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.utils import test_case
from object_detection.utils import visualization_utils
_TESTDATA_PATH = 'object_detection/test_images'
class VisualizationUtilsTest(tf.test.TestCase):
def get_iterator_next_for_testing(dataset, is_tf2):
# In TF2, lookup tables are not supported in one shot iterators, but
# initialization is implicit.
if is_tf2:
return dataset.make_initializable_iterator().get_next()
# In TF1, we use one shot iterator because it does not require running
# a separate init op.
else:
return dataset.make_one_shot_iterator().get_next()
class VisualizationUtilsTest(test_case.TestCase):
def test_get_prime_multiplier_for_color_randomness(self):
# Show that default multipler is not 1 and does not divide the total number
......@@ -151,13 +164,12 @@ class VisualizationUtilsTest(tf.test.TestCase):
def test_draw_bounding_boxes_on_image_tensors(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'image1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
with tf.Graph().as_default():
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.6, 0.9]],
......@@ -178,22 +190,20 @@ class VisualizationUtilsTest(tf.test.TestCase):
keypoints=keypoints,
min_score_thresh=0.2,
keypoint_edges=keypoint_edges))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# Write output images for visualization.
images_with_boxes_np = sess.run(images_with_boxes)
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
return images_with_boxes
# Write output images for visualization.
images_with_boxes_np = self.execute(graph_fn, [])
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_track_ids(self):
"""Tests that bounding box utility produces reasonable results."""
......@@ -204,7 +214,7 @@ class VisualizationUtilsTest(tf.test.TestCase):
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
with tf.Graph().as_default():
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75],
......@@ -227,22 +237,20 @@ class VisualizationUtilsTest(tf.test.TestCase):
true_image_shape=image_shape,
track_ids=track_ids,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# Write output images for visualization.
images_with_boxes_np = sess.run(images_with_boxes)
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_with_track_ids_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
return images_with_boxes
# Write output images for visualization.
images_with_boxes_np = self.execute(graph_fn, [])
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_with_track_ids_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_additional_channels(self):
"""Tests the case where input image tensor has more than 3 channels."""
......@@ -250,7 +258,7 @@ class VisualizationUtilsTest(tf.test.TestCase):
image_np = self.create_test_image_with_five_channels()
images_np = np.stack((image_np, image_np), axis=0)
with tf.Graph().as_default():
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
......@@ -264,11 +272,10 @@ class VisualizationUtilsTest(tf.test.TestCase):
category_index,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
return images_with_boxes
final_images_np = sess.run(images_with_boxes)
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
final_images_np = self.execute(graph_fn, [])
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_bounding_boxes_on_image_tensors_grayscale(self):
"""Tests the case where input image tensor has one channel."""
......@@ -276,7 +283,7 @@ class VisualizationUtilsTest(tf.test.TestCase):
image_np = self.create_test_grayscale_image()
images_np = np.stack((image_np, image_np), axis=0)
with tf.Graph().as_default():
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant([[100, 200], [100, 200]], dtype=tf.int32)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
......@@ -293,11 +300,10 @@ class VisualizationUtilsTest(tf.test.TestCase):
true_image_shape=image_shape,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
return images_with_boxes
final_images_np = sess.run(images_with_boxes)
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
final_images_np = self.execute(graph_fn, [])
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_keypoints_on_image(self):
test_image = self.create_colorful_test_image()
......@@ -407,7 +413,7 @@ class VisualizationUtilsTest(tf.test.TestCase):
heatmap2 = np.asarray([[0, 1],
[1, 0]], dtype=np.float)
heatmaps = np.stack([heatmap1, heatmap2], axis=0)
with tf.Graph().as_default():
def graph_fn():
image_tensor = tf.constant(test_image, dtype=tf.uint8)
image_tensor = tf.expand_dims(image_tensor, axis=0)
heatmaps_tensor = tf.expand_dims(
......@@ -417,34 +423,38 @@ class VisualizationUtilsTest(tf.test.TestCase):
heatmaps=heatmaps_tensor,
apply_sigmoid=False)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
return output_image
output_image_np = sess.run(output_image)
self.assertAllEqual(
output_image_np,
np.expand_dims(
np.array([[[240, 248, 255], [127, 255, 0]],
[[127, 255, 0], [240, 248, 255]]]),
axis=0))
output_image_np = self.execute(graph_fn, [])
self.assertAllEqual(
output_image_np,
np.expand_dims(
np.array([[[240, 248, 255], [127, 255, 0]],
[[127, 255, 0], [240, 248, 255]]]),
axis=0))
def test_add_cdf_image_summary(self):
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
visualization_utils.add_cdf_image_summary(values, 'PositiveAnchorLoss')
cdf_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
with self.test_session():
cdf_image_summary.eval()
def graph_fn():
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
visualization_utils.add_cdf_image_summary(values, 'PositiveAnchorLoss')
cdf_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
return cdf_image_summary
self.execute(graph_fn, [])
def test_add_hist_image_summary(self):
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
bins = [0.01 * i for i in range(101)]
visualization_utils.add_hist_image_summary(values, bins,
'ScoresDistribution')
hist_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
with self.test_session():
hist_image_summary.eval()
def graph_fn():
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
bins = [0.01 * i for i in range(101)]
visualization_utils.add_hist_image_summary(values, bins,
'ScoresDistribution')
hist_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
return hist_image_summary
self.execute(graph_fn, [])
def test_eval_metric_ops(self):
if self.is_tf2():
self.skipTest('This test is only compatible with Tensorflow 1.X, '
'estimator eval ops are not supported in Tensorflow 2.')
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
max_examples_to_draw = 4
metric_op_base = 'Detections_Left_Groundtruth_Right'
......
......@@ -19,7 +19,7 @@ from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.utils import vrd_evaluation
......
......@@ -37,8 +37,9 @@ sh_binary(
py_binary(
name = "build_imagenet_data",
srcs = ["datasets/build_imagenet_data.py"],
python_version = "PY2",
python_version = "PY3",
deps = [
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//numpy",
"//third_party/py/six",
# "//tensorflow",
......@@ -59,8 +60,10 @@ py_library(
py_library(
name = "download_and_convert_flowers",
srcs = ["datasets/download_and_convert_flowers.py"],
srcs_version = "PY2AND3",
deps = [
":dataset_utils",
"//third_party/py/six",
# "//tensorflow",
],
)
......@@ -79,10 +82,12 @@ py_library(
py_library(
name = "download_and_convert_visualwakewords_lib",
srcs = ["datasets/download_and_convert_visualwakewords_lib.py"],
srcs_version = "PY2AND3",
deps = [
":dataset_utils",
"//third_party/py/PIL:pil",
"//third_party/py/contextlib2",
"//third_party/py/six",
# "//tensorflow",
],
)
......@@ -90,6 +95,7 @@ py_library(
py_library(
name = "download_and_convert_visualwakewords",
srcs = ["datasets/download_and_convert_visualwakewords.py"],
srcs_version = "PY2AND3",
deps = [
":download_and_convert_visualwakewords_lib",
# "//tensorflow",
......@@ -99,12 +105,13 @@ py_library(
py_binary(
name = "download_and_convert_data",
srcs = ["download_and_convert_data.py"],
python_version = "PY2",
python_version = "PY3",
deps = [
":download_and_convert_cifar10",
":download_and_convert_flowers",
":download_and_convert_mnist",
":download_and_convert_visualwakewords",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
],
)
......@@ -115,7 +122,7 @@ py_library(
deps = [
":dataset_utils",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -125,7 +132,7 @@ py_library(
deps = [
":dataset_utils",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -136,7 +143,7 @@ py_library(
":dataset_utils",
"//third_party/py/six",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -146,7 +153,7 @@ py_library(
deps = [
":dataset_utils",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -156,7 +163,7 @@ py_library(
deps = [
":dataset_utils",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -177,22 +184,20 @@ py_library(
srcs = ["deployment/model_deploy.py"],
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "model_deploy_test",
srcs = ["deployment/model_deploy_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3",
deps = [
":model_deploy",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//numpy",
# "//tensorflow",
# "//tensorflow/contrib/framework:framework_py",
# "//tensorflow/contrib/layers:layers_py",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -201,7 +206,7 @@ py_library(
srcs = ["preprocessing/cifarnet_preprocessing.py"],
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -219,7 +224,7 @@ py_library(
srcs = ["preprocessing/lenet_preprocessing.py"],
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -228,7 +233,7 @@ py_library(
srcs = ["preprocessing/vgg_preprocessing.py"],
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -240,7 +245,7 @@ py_library(
":inception_preprocessing",
":lenet_preprocessing",
":vgg_preprocessing",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -273,20 +278,20 @@ py_library(
srcs_version = "PY2AND3",
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "alexnet_test",
size = "medium",
srcs = ["nets/alexnet_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3",
deps = [
":alexnet",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -295,7 +300,7 @@ py_library(
srcs = ["nets/cifarnet.py"],
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -306,20 +311,19 @@ py_library(
# "//numpy",
"//third_party/py/six",
# "//tensorflow",
# "//tensorflow/contrib/framework:framework_py",
# "//tensorflow/contrib/layers:layers_py",
# "//tensorflow/contrib/util:util_py",
"//third_party/py/tf_slim:slim",
# "//tensorflow/python:tensor_util",
],
)
py_test(
py_test( # py2and3_test
name = "cyclegan_test",
srcs = ["nets/cyclegan_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
":cyclegan",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
],
)
......@@ -330,18 +334,18 @@ py_library(
deps = [
"//third_party/py/six",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "dcgan_test",
srcs = ["nets/dcgan_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
":dcgan",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
"//third_party/py/six",
# "//tensorflow",
],
......@@ -355,20 +359,22 @@ py_library(
":i3d_utils",
":s3dg",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "i3d_test",
size = "large",
srcs = ["nets/i3d_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
":i3d",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
"//third_party/py/six",
# "//tensorflow",
"//third_party/py/tf_slim:slim",
],
)
......@@ -379,8 +385,7 @@ py_library(
deps = [
# "//numpy",
# "//tensorflow",
# "//tensorflow/contrib/framework:framework_py",
# "//tensorflow/contrib/layers:layers_py",
"//third_party/py/tf_slim:slim",
],
)
......@@ -403,7 +408,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -414,7 +419,7 @@ py_library(
deps = [
":inception_utils",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -425,7 +430,7 @@ py_library(
deps = [
":inception_utils",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -436,7 +441,7 @@ py_library(
deps = [
":inception_utils",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -447,7 +452,7 @@ py_library(
deps = [
":inception_utils",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -457,22 +462,22 @@ py_library(
srcs_version = "PY2AND3",
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "inception_v1_test",
size = "large",
srcs = ["nets/inception_v1_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
":inception",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//numpy",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -484,52 +489,53 @@ py_test( # py2and3_test
srcs_version = "PY2AND3",
deps = [
":inception",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//numpy",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "inception_v3_test",
size = "large",
srcs = ["nets/inception_v3_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
":inception",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//numpy",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "inception_v4_test",
size = "large",
srcs = ["nets/inception_v4_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
":inception",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "inception_resnet_v2_test",
size = "large",
srcs = ["nets/inception_resnet_v2_test.py"],
python_version = "PY2",
shard_count = 3,
shard_count = 4,
srcs_version = "PY2AND3",
deps = [
":inception",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -538,7 +544,7 @@ py_library(
srcs = ["nets/lenet.py"],
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -548,8 +554,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
# "//tensorflow",
# "//tensorflow/contrib/layers:layers_py",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -562,7 +567,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -573,8 +578,7 @@ py_library(
deps = [
":mobilenet_common",
# "//tensorflow",
# "//tensorflow/contrib/layers:layers_py",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -586,7 +590,7 @@ py_library(
":mobilenet_common",
# "//numpy",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -597,18 +601,22 @@ py_test( # py2and3_test
deps = [
":mobilenet",
":mobilenet_common",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
"//third_party/py/six",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test( # py2and3_test
name = "mobilenet_v3_test",
srcs = ["nets/mobilenet/mobilenet_v3_test.py"],
shard_count = 2,
srcs_version = "PY2AND3",
deps = [
":mobilenet",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
"//testing/pybase:parameterized",
"//third_party/py/absl/testing:absltest",
# "//tensorflow",
],
......@@ -623,46 +631,49 @@ py_library(
],
)
py_test(
py_test( # py2and3_test
name = "mobilenet_v1_test",
size = "large",
srcs = ["nets/mobilenet_v1_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
":mobilenet_v1",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//numpy",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_binary(
name = "mobilenet_v1_train",
srcs = ["nets/mobilenet_v1_train.py"],
python_version = "PY2",
python_version = "PY3",
deps = [
":dataset_factory",
":mobilenet_v1",
":preprocessing_factory",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
"//third_party/py/tf_slim:slim",
# "//tensorflow/contrib/quantize:quantize_graph",
# "//tensorflow/contrib/slim",
],
)
py_binary(
name = "mobilenet_v1_eval",
srcs = ["nets/mobilenet_v1_eval.py"],
python_version = "PY2",
python_version = "PY3",
srcs_version = "PY3",
deps = [
":dataset_factory",
":mobilenet_v1",
":preprocessing_factory",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
"//third_party/py/tf_slim:slim",
# "//tensorflow/contrib/quantize:quantize_graph",
# "//tensorflow/contrib/slim",
],
)
......@@ -672,8 +683,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
# "//tensorflow",
# "//tensorflow/contrib/framework:framework_py",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -684,36 +694,34 @@ py_library(
deps = [
":nasnet_utils",
# "//tensorflow",
# "//tensorflow/contrib/framework:framework_py",
# "//tensorflow/contrib/layers:layers_py",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
# "//tensorflow/contrib/training:training_py",
],
)
py_test(
py_test( # py2and3_test
name = "nasnet_utils_test",
size = "medium",
srcs = ["nets/nasnet/nasnet_utils_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3",
deps = [
":nasnet_utils",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
],
)
py_test(
py_test( # py2and3_test
name = "nasnet_test",
size = "large",
srcs = ["nets/nasnet/nasnet_test.py"],
python_version = "PY2",
shard_count = 10,
srcs_version = "PY2AND3",
deps = [
":nasnet",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -725,23 +733,22 @@ py_library(
":nasnet",
":nasnet_utils",
# "//tensorflow",
# "//tensorflow/contrib/framework:framework_py",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
# "//tensorflow/contrib/training:training_py",
],
)
py_test(
py_test( # py2and3_test
name = "pnasnet_test",
size = "large",
srcs = ["nets/nasnet/pnasnet_test.py"],
python_version = "PY2",
shard_count = 4,
srcs_version = "PY2AND3",
deps = [
":pnasnet",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -751,7 +758,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -762,8 +769,9 @@ py_test( # py2and3_test
srcs_version = "PY2AND3",
deps = [
":overfeat",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -773,8 +781,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
# "//tensorflow",
# "//tensorflow/contrib/framework:framework_py",
# "//tensorflow/contrib/layers:layers_py",
"//third_party/py/tf_slim:slim",
],
)
......@@ -784,8 +791,9 @@ py_test( # py2and3_test
srcs_version = "PY2AND3",
deps = [
":pix2pix",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
# "//tensorflow/contrib/framework:framework_py",
"//third_party/py/tf_slim:slim",
],
)
......@@ -795,7 +803,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -806,24 +814,24 @@ py_library(
deps = [
":resnet_utils",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "resnet_v1_test",
size = "medium",
timeout = "long",
srcs = ["nets/resnet_v1_test.py"],
python_version = "PY2",
shard_count = 2,
srcs_version = "PY2AND3",
deps = [
":resnet_utils",
":resnet_v1",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//numpy",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -834,23 +842,23 @@ py_library(
deps = [
":resnet_utils",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "resnet_v2_test",
size = "medium",
srcs = ["nets/resnet_v2_test.py"],
python_version = "PY2",
shard_count = 2,
srcs_version = "PY2AND3",
deps = [
":resnet_utils",
":resnet_v2",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//numpy",
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -861,20 +869,20 @@ py_library(
deps = [
":i3d_utils",
# "//tensorflow",
# "//tensorflow/contrib/framework:framework_py",
# "//tensorflow/contrib/layers:layers_py",
"//third_party/py/tf_slim:slim",
],
)
py_test(
py_test( # py2and3_test
name = "s3dg_test",
size = "large",
srcs = ["nets/s3dg_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
":s3dg",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
"//third_party/py/six",
# "//tensorflow",
],
)
......@@ -885,7 +893,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -896,8 +904,9 @@ py_test( # py2and3_test
srcs_version = "PY2AND3",
deps = [
":vgg",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -906,7 +915,7 @@ py_library(
srcs = ["nets/nets_factory.py"],
deps = [
":nets",
# "//tensorflow/contrib/slim",
"//third_party/py/tf_slim:slim",
],
)
......@@ -918,6 +927,7 @@ py_test( # py2and3_test
srcs_version = "PY2AND3",
deps = [
":nets_factory",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
],
)
......@@ -929,6 +939,7 @@ pytype_strict_binary(
deps = [
":nets_factory",
":preprocessing_factory",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
"//third_party/py/absl:app",
"//third_party/py/absl/flags",
# "//tensorflow",
......@@ -946,8 +957,8 @@ py_library(
":nets_factory",
":preprocessing_factory",
# "//tensorflow",
"//third_party/py/tf_slim:slim",
# "//tensorflow/contrib/quantize:quantize_graph",
# "//tensorflow/contrib/slim",
],
)
......@@ -956,9 +967,10 @@ py_binary(
srcs = ["train_image_classifier.py"],
# WARNING: not supported in bazel; will be commented out by copybara.
# paropts = ["--compress"],
python_version = "PY2",
python_version = "PY3",
deps = [
":train_image_classifier_lib",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
],
)
......@@ -970,17 +982,18 @@ py_library(
":nets_factory",
":preprocessing_factory",
# "//tensorflow",
"//third_party/py/tf_slim:slim",
# "//tensorflow/contrib/quantize:quantize_graph",
# "//tensorflow/contrib/slim",
],
)
py_binary(
name = "eval_image_classifier",
srcs = ["eval_image_classifier.py"],
python_version = "PY2",
python_version = "PY3",
deps = [
":eval_image_classifier_lib",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
],
)
......@@ -989,8 +1002,11 @@ py_binary(
srcs = ["export_inference_graph.py"],
# WARNING: not supported in bazel; will be commented out by copybara.
# paropts = ["--compress"],
python_version = "PY2",
deps = [":export_inference_graph_lib"],
python_version = "PY3",
deps = [
":export_inference_graph_lib",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
],
)
py_library(
......@@ -1001,22 +1017,22 @@ py_library(
":nets_factory",
# "//tensorflow",
# "//tensorflow/contrib/quantize:quantize_graph",
# "//tensorflow/contrib/slim",
# "//tensorflow/python:platform",
],
)
py_test(
py_test( # py2and3_test
name = "export_inference_graph_test",
size = "medium",
srcs = ["export_inference_graph_test.py"],
python_version = "PY2",
python_version = "PY3",
srcs_version = "PY2AND3",
tags = [
"manual",
],
deps = [
":export_inference_graph_lib",
"//learning/brain/public:disable_tf2", # build_cleaner: keep; go/disable_tf2
# "//tensorflow",
# "//tensorflow/python:platform",
],
......
![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen)
![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg)
# TensorFlow-Slim image classification model library
[TF-slim](https://github.com/tensorflow/models/tree/master/research/slim)
is a new lightweight high-level API of TensorFlow (`tensorflow.contrib.slim`)
for defining, training and evaluating complex
models. This directory contains
code for training and evaluating several widely used Convolutional Neural
Network (CNN) image classification models using TF-slim.
It contains scripts that will allow
This directory contains code for training and evaluating several
widely used Convolutional Neural Network (CNN) image classification
models using
[tf_slim](https://github.com/google-research/tf-slim/tree/master/tf_slim).
It contains scripts that allow
you to train models from scratch or fine-tune them from pre-trained network
weights. It also contains code for downloading standard image datasets,
converting them
......@@ -18,15 +12,12 @@ data reading and queueing utilities. You can easily train any model on any of
these datasets, as we demonstrate below. We've also included a
[jupyter notebook](https://github.com/tensorflow/models/blob/master/research/slim/slim_walkthrough.ipynb),
which provides working examples of how to use TF-Slim for image classification.
For developing or modifying your own models, see also the [main TF-Slim page](https://github.com/tensorflow/models/tree/master/research/slim).
For developing or modifying your own models, see also the [main TF-Slim page](https://github.com/google-research/tf-slim/tree/master/tf_slim).
## Contacts
Maintainers of TF-slim:
* Sergio Guadarrama, GitHub: [sguada](https://github.com/sguada)
* Nathan Silberman,
GitHub: [nathansilberman](https://github.com/nathansilberman)
* Sergio Guadarrama, github: [sguada](https://github.com/sguada)
## Citation
"TensorFlow-Slim image classification model library"
......@@ -52,12 +43,12 @@ prerequisite packages.
## Installing latest version of TF-slim
TF-Slim is available as `tf.contrib.slim` via TensorFlow 1.0. To test that your
TF-Slim is available as `tf_slim` package. To test that your
installation is working, execute the following command; it should run without
raising any errors.
```
python -c "import tensorflow.contrib.slim as slim; eval = slim.evaluation.evaluate_once"
python -c "import tf_slim as slim; eval = slim.evaluation.evaluate_once"
```
## Installing the TF-slim image models library
......@@ -143,7 +134,7 @@ download can take several hours, and could use up to 500GB.
## Creating a TF-Slim Dataset Descriptor.
Once the TFRecord files have been created, you can easily define a Slim
[Dataset](https://github.com/tensorflow/tensorflow/blob/r0.10/tensorflow/contrib/slim/python/slim/data/dataset.py),
[Dataset](https://github.com/google-research/tf-slim/master/tf_slim/data/dataset.py),
which stores pointers to the data file, as well as various other pieces of
metadata, such as the class labels, the train/test split, and how to parse the
TFExample protos. We have included the TF-Slim Dataset descriptors
......@@ -156,14 +147,15 @@ and
[VisualWakeWords](https://github.com/tensorflow/models/blob/master/research/slim/datasets/visualwakewords.py),
An example of how to load data using a TF-Slim dataset descriptor using a
TF-Slim
[DatasetDataProvider](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py)
[DatasetDataProvider](https://github.com/google-research/tf-slim/tree/master/tf_slim/data/dataset_data_provider.py)
is found below:
```python
import tensorflow as tf
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import flowers
slim = tf.contrib.slim
# Selects the 'validation' dataset.
dataset = flowers.get_split('validation', DATA_DIR)
......@@ -205,10 +197,10 @@ you will not need to interact with the script again.
DATA_DIR=$HOME/imagenet-data
# build the preprocessing script.
bazel build slim/download_and_preprocess_imagenet
bazel build slim/download_and_convert_imagenet
# run it
bazel-bin/slim/download_and_preprocess_imagenet "${DATA_DIR}"
bazel-bin/slim/download_and_convert_imagenet "${DATA_DIR}"
```
The final line of the output script should read:
......@@ -414,7 +406,7 @@ $ python eval_image_classifier.py \
--model_name=inception_v3
```
See the [evaluation module example](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim#evaluation-loop)
See the [evaluation module example](https://github.com/google-research/tf-slim#evaluation-loop)
for an example of how to evaluate a model at multiple checkpoints during or after the training.
# Exporting the Inference Graph
......
......@@ -94,7 +94,7 @@ import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
......
......@@ -23,13 +23,11 @@ from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
slim = contrib_slim
_FILE_PATTERN = 'cifar10_%s.tfrecord'
SPLITS_TO_SIZES = {'train': 50000, 'test': 10000}
......
......@@ -23,7 +23,7 @@ import tarfile
import zipfile
from six.moves import urllib
import tensorflow as tf
import tensorflow.compat.v1 as tf
LABELS_FILENAME = 'labels.txt'
......
......@@ -33,7 +33,7 @@ import tarfile
import numpy as np
from six.moves import cPickle
from six.moves import urllib
import tensorflow as tf
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
......
......@@ -32,7 +32,9 @@ import os
import random
import sys
import tensorflow as tf
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
......@@ -189,7 +191,8 @@ def run(dataset_dir):
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
class_names_to_ids = dict(
list(zip(class_names, list(range(len(class_names))))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
......@@ -204,7 +207,8 @@ def run(dataset_dir):
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
labels_to_class_names = dict(
list(zip(list(range(len(class_names))), class_names)))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
......
......@@ -32,7 +32,7 @@ import sys
import numpy as np
from six.moves import urllib
import tensorflow as tf
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
......
......@@ -77,17 +77,17 @@ from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow.compat.v1 as tf
from datasets import download_and_convert_visualwakewords_lib
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.app.flags.DEFINE_string(
tf.app.flags.DEFINE_string(
'coco_dirname', 'coco_dataset',
'A subdirectory in visualwakewords dataset directory'
'containing the coco dataset')
FLAGS = tf.compat.v1.app.flags.FLAGS
FLAGS = tf.app.flags.FLAGS
def run(dataset_dir, small_object_area_threshold, foreground_class_of_interest):
......
......@@ -32,26 +32,27 @@ import contextlib2
import PIL.Image
import tensorflow as tf
import six
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.app.flags.DEFINE_string(
tf.app.flags.DEFINE_string(
'coco_train_url',
'http://images.cocodataset.org/zips/train2014.zip',
'Link to zip file containing coco training data')
tf.compat.v1.app.flags.DEFINE_string(
tf.app.flags.DEFINE_string(
'coco_validation_url',
'http://images.cocodataset.org/zips/val2014.zip',
'Link to zip file containing coco validation data')
tf.compat.v1.app.flags.DEFINE_string(
tf.app.flags.DEFINE_string(
'coco_annotations_url',
'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',
'Link to zip file containing coco annotation data')
FLAGS = tf.compat.v1.app.flags.FLAGS
FLAGS = tf.app.flags.FLAGS
def download_coco_dataset(dataset_dir):
......@@ -201,7 +202,7 @@ def create_tf_record_for_visualwakewords_dataset(annotations_file, image_dir,
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
annotations_index = groundtruth_data['annotations']
annotations_index = {int(k): v for k, v in annotations_index.iteritems()}
annotations_index = {int(k): v for k, v in six.iteritems(annotations_index)}
# convert 'unicode' key to 'int' key after we parse the json file
for idx, image in enumerate(images):
......
......@@ -23,13 +23,11 @@ from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
slim = contrib_slim
_FILE_PATTERN = 'flowers_%s_*.tfrecord'
SPLITS_TO_SIZES = {'train': 3320, 'validation': 350}
......
......@@ -34,13 +34,11 @@ from __future__ import print_function
import os
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
slim = contrib_slim
# TODO(nsilberman): Add tfrecord file type once the script is updated.
_FILE_PATTERN = '%s-*'
......
......@@ -23,13 +23,11 @@ from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
slim = contrib_slim
_FILE_PATTERN = 'mnist_%s.tfrecord'
_SPLITS_TO_SIZES = {'train': 60000, 'test': 10000}
......
......@@ -28,14 +28,12 @@ from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
slim = contrib_slim
_FILE_PATTERN = '%s.record-*'
_SPLITS_TO_SIZES = {
......
......@@ -101,10 +101,8 @@ from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
__all__ = ['create_clones',
......
......@@ -19,14 +19,11 @@ from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
from deployment import model_deploy
import tf_slim as slim
slim = contrib_slim
from deployment import model_deploy
class DeploymentConfigTest(tf.test.TestCase):
......@@ -511,9 +508,8 @@ class DeployTest(tf.test.TestCase):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
moving_mean = contrib_framework.get_variables_by_name('moving_mean')[0]
moving_variance = contrib_framework.get_variables_by_name(
'moving_variance')[0]
moving_mean = slim.get_variables_by_name('moving_mean')[0]
moving_variance = slim.get_variables_by_name('moving_variance')[0]
initial_loss = sess.run(model.total_loss)
initial_mean, initial_variance = sess.run([moving_mean,
moving_variance])
......@@ -539,8 +535,8 @@ class DeployTest(tf.test.TestCase):
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = contrib_layers.l2_regularizer(0.001)
contrib_layers.fully_connected(inputs, 30, weights_regularizer=reg)
reg = slim.l2_regularizer(0.001)
slim.fully_connected(inputs, 30, weights_regularizer=reg)
model = model_deploy.deploy(
deploy_config, ModelFn,
......@@ -558,8 +554,8 @@ class DeployTest(tf.test.TestCase):
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = contrib_layers.l2_regularizer(0.001)
contrib_layers.fully_connected(inputs, 30, weights_regularizer=reg)
reg = slim.l2_regularizer(0.001)
slim.fully_connected(inputs, 30, weights_regularizer=reg)
# No optimizer here, it's an eval.
model = model_deploy.deploy(deploy_config, ModelFn)
......
......@@ -39,22 +39,22 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.compat.v1 as tf
from datasets import download_and_convert_cifar10
from datasets import download_and_convert_flowers
from datasets import download_and_convert_mnist
from datasets import download_and_convert_visualwakewords
FLAGS = tf.compat.v1.app.flags.FLAGS
FLAGS = tf.app.flags.FLAGS
tf.compat.v1.app.flags.DEFINE_string(
tf.app.flags.DEFINE_string(
'dataset_name',
None,
'The name of the dataset to convert, one of "flowers", "cifar10", "mnist", "visualwakewords"'
)
tf.compat.v1.app.flags.DEFINE_string(
tf.app.flags.DEFINE_string(
'dataset_dir',
None,
'The directory where the output TFRecords and temporary files are saved.')
......@@ -91,4 +91,4 @@ def main(_):
'dataset_name [%s] was not recognized.' % FLAGS.dataset_name)
if __name__ == '__main__':
tf.compat.v1.app.run()
tf.app.run()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment