Commit d4eedbb9 authored by Mark Sandler's avatar Mark Sandler Committed by Hongkun Yu
Browse files

Merged commit includes the following changes: (#8077)



Internal cleanup (py2->py3) plus the following changes:

285513318  by Sergio Guadarrama:

    Adds a script for post-training quantization

284222305  by Sergio Guadarrama:

    Modified squeeze-excite operation to accommodate tensors of undefined (Nonetype) H/W.

282028343  by Sergio Guadarrama:

    Add MobilenetV3 and MobilenetEdgeTPU to the slim/nets_factory.

PiperOrigin-RevId: 289455329
Co-authored-by: default avatarSergio Guadarrama <sguada@gmail.com>
parent 0e0a94a6
# Description:
# Contains files for loading, training and evaluating TF-Slim-based models.
# load("//devtools/python/blaze:python3.bzl", "py2and3_test")
load("//devtools/python/blaze:pytype.bzl", "pytype_strict_binary")
package(
default_visibility = ["//visibility:public"],
......@@ -475,11 +476,10 @@ py_test(
],
)
py_test(
py_test( # py2and3_test
name = "inception_v2_test",
size = "large",
srcs = ["nets/inception_v2_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
......@@ -590,14 +590,14 @@ py_library(
],
)
py_test(
py_test( # py2and3_test
name = "mobilenet_v2_test",
srcs = ["nets/mobilenet/mobilenet_v2_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3",
deps = [
":mobilenet",
":mobilenet_common",
"//third_party/py/six",
# "//tensorflow",
# "//tensorflow/contrib/slim",
],
......@@ -755,11 +755,10 @@ py_library(
],
)
py_test(
py_test( # py2and3_test
name = "overfeat_test",
size = "medium",
srcs = ["nets/overfeat_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3",
deps = [
":overfeat",
......@@ -890,11 +889,10 @@ py_library(
],
)
py_test(
py_test( # py2and3_test
name = "vgg_test",
size = "medium",
srcs = ["nets/vgg_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3",
deps = [
":vgg",
......@@ -912,11 +910,10 @@ py_library(
],
)
py_test(
py_test( # py2and3_test
name = "nets_factory_test",
size = "large",
srcs = ["nets/nets_factory_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
......@@ -925,9 +922,24 @@ py_test(
],
)
pytype_strict_binary(
name = "post_training_quantization",
srcs = ["nets/post_training_quantization.py"],
python_version = "PY3",
deps = [
":nets_factory",
":preprocessing_factory",
"//third_party/py/absl:app",
"//third_party/py/absl/flags",
# "//tensorflow",
# "//tensorflow_datasets",
],
)
py_library(
name = "train_image_classifier_lib",
srcs = ["train_image_classifier.py"],
srcs_version = "PY2AND3",
deps = [
":dataset_factory",
":model_deploy",
......
......@@ -201,7 +201,7 @@ def create_tf_record_for_visualwakewords_dataset(annotations_file, image_dir,
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
annotations_index = groundtruth_data['annotations']
annotations_index = {int(k): v for k, v in annotations_index.items()}
annotations_index = {int(k): v for k, v in annotations_index.iteritems()}
# convert 'unicode' key to 'int' key after we parse the json file
for idx, image in enumerate(images):
......
......@@ -40,13 +40,16 @@ import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
def alexnet_v2_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
biases_initializer=tf.constant_initializer(0.1),
biases_initializer=tf.compat.v1.constant_initializer(0.1),
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
......@@ -94,7 +97,7 @@ def alexnet_v2(inputs,
or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
with tf.compat.v1.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
......@@ -110,9 +113,10 @@ def alexnet_v2(inputs,
net = slim.max_pool2d(net, [3, 3], 2, scope='pool5')
# Use conv2d instead of fully_connected layers.
with slim.arg_scope([slim.conv2d],
with slim.arg_scope(
[slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1)):
biases_initializer=tf.compat.v1.constant_initializer(0.1)):
net = slim.conv2d(net, 4096, [5, 5], padding='VALID',
scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
......@@ -122,15 +126,18 @@ def alexnet_v2(inputs,
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
net = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
biases_initializer=tf.compat.v1.zeros_initializer(),
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
......
......@@ -32,7 +32,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
......@@ -43,7 +43,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 300, 400
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
......@@ -54,7 +54,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
......@@ -66,7 +66,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
......@@ -87,7 +87,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
......@@ -110,7 +110,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
......@@ -137,11 +137,11 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
......@@ -151,29 +151,29 @@ class AlexnetV2Test(tf.test.TestCase):
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
tf.compat.v1.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
......
......@@ -23,7 +23,9 @@ from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(stddev=stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
stddev=stddev)
def cifarnet(images, num_classes=10, is_training=False,
......@@ -61,7 +63,7 @@ def cifarnet(images, num_classes=10, is_training=False,
"""
end_points = {}
with tf.variable_scope(scope, 'CifarNet', [images]):
with tf.compat.v1.variable_scope(scope, 'CifarNet', [images]):
net = slim.conv2d(images, 64, [5, 5], scope='conv1')
end_points['conv1'] = net
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
......@@ -82,9 +84,11 @@ def cifarnet(images, num_classes=10, is_training=False,
end_points['fc4'] = net
if not num_classes:
return net, end_points
logits = slim.fully_connected(net, num_classes,
biases_initializer=tf.zeros_initializer(),
weights_initializer=trunc_normal(1/192.0),
logits = slim.fully_connected(
net,
num_classes,
biases_initializer=tf.compat.v1.zeros_initializer(),
weights_initializer=trunc_normal(1 / 192.0),
weights_regularizer=None,
activation_fn=None,
scope='logits')
......@@ -107,11 +111,12 @@ def cifarnet_arg_scope(weight_decay=0.004):
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=5e-2),
weights_initializer=tf.compat.v1.truncated_normal_initializer(
stddev=5e-2),
activation_fn=tf.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=tf.constant_initializer(0.1),
biases_initializer=tf.compat.v1.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc:
......
......@@ -61,7 +61,8 @@ def cyclegan_arg_scope(instance_norm_center=True,
[layers.conv2d],
normalizer_fn=layers.instance_norm,
normalizer_params=instance_norm_params,
weights_initializer=tf.random_normal_initializer(0, weights_init_stddev),
weights_initializer=tf.compat.v1.random_normal_initializer(
0, weights_init_stddev),
weights_regularizer=weights_regularizer) as sc:
return sc
......@@ -90,8 +91,8 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose',
Raises:
ValueError: if `method` is not recognized.
"""
with tf.variable_scope('upconv'):
net_shape = tf.shape(net)
with tf.compat.v1.variable_scope('upconv'):
net_shape = tf.shape(input=net)
height = net_shape[1]
width = net_shape[2]
......@@ -101,15 +102,16 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose',
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == 'nn_upsample_conv':
net = tf.image.resize_nearest_neighbor(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, pad_mode)
net = tf.image.resize(
net, [stride[0] * height, stride[1] * width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
net = tf.pad(tensor=net, paddings=spatial_pad_1, mode=pad_mode)
net = layers.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
elif method == 'bilinear_upsample_conv':
net = tf.image.resize_bilinear(
net = tf.compat.v1.image.resize_bilinear(
net, [stride[0] * height, stride[1] * width],
align_corners=align_corners)
net = tf.pad(net, spatial_pad_1, pad_mode)
net = tf.pad(tensor=net, paddings=spatial_pad_1, mode=pad_mode)
net = layers.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
elif method == 'conv2d_transpose':
# This corrects 1 pixel offset for images with even width and height.
......@@ -126,7 +128,7 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose',
def _dynamic_or_static_shape(tensor):
shape = tf.shape(tensor)
shape = tf.shape(input=tensor)
static_shape = contrib_util.constant_value(shape)
return static_shape if static_shape is not None else shape
......@@ -204,40 +206,40 @@ def cyclegan_generator_resnet(images,
###########
# Encoder #
###########
with tf.variable_scope('input'):
with tf.compat.v1.variable_scope('input'):
# 7x7 input stage
net = tf.pad(images, spatial_pad_3, 'REFLECT')
net = tf.pad(tensor=images, paddings=spatial_pad_3, mode='REFLECT')
net = layers.conv2d(net, num_filters, kernel_size=[7, 7], padding='VALID')
end_points['encoder_0'] = net
with tf.variable_scope('encoder'):
with tf.compat.v1.variable_scope('encoder'):
with contrib_framework.arg_scope([layers.conv2d],
kernel_size=kernel_size,
stride=2,
activation_fn=tf.nn.relu,
padding='VALID'):
net = tf.pad(net, paddings, 'REFLECT')
net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
net = layers.conv2d(net, num_filters * 2)
end_points['encoder_1'] = net
net = tf.pad(net, paddings, 'REFLECT')
net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
net = layers.conv2d(net, num_filters * 4)
end_points['encoder_2'] = net
###################
# Residual Blocks #
###################
with tf.variable_scope('residual_blocks'):
with tf.compat.v1.variable_scope('residual_blocks'):
with contrib_framework.arg_scope([layers.conv2d],
kernel_size=kernel_size,
stride=1,
activation_fn=tf.nn.relu,
padding='VALID'):
for block_id in xrange(num_resnet_blocks):
with tf.variable_scope('block_{}'.format(block_id)):
res_net = tf.pad(net, paddings, 'REFLECT')
with tf.compat.v1.variable_scope('block_{}'.format(block_id)):
res_net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
res_net = layers.conv2d(res_net, num_filters * 4)
res_net = tf.pad(res_net, paddings, 'REFLECT')
res_net = tf.pad(tensor=res_net, paddings=paddings, mode='REFLECT')
res_net = layers.conv2d(res_net, num_filters * 4,
activation_fn=None)
net += res_net
......@@ -247,23 +249,23 @@ def cyclegan_generator_resnet(images,
###########
# Decoder #
###########
with tf.variable_scope('decoder'):
with tf.compat.v1.variable_scope('decoder'):
with contrib_framework.arg_scope([layers.conv2d],
kernel_size=kernel_size,
stride=1,
activation_fn=tf.nn.relu):
with tf.variable_scope('decoder1'):
with tf.compat.v1.variable_scope('decoder1'):
net = upsample_fn(net, num_outputs=num_filters * 2, stride=[2, 2])
end_points['decoder1'] = net
with tf.variable_scope('decoder2'):
with tf.compat.v1.variable_scope('decoder2'):
net = upsample_fn(net, num_outputs=num_filters, stride=[2, 2])
end_points['decoder2'] = net
with tf.variable_scope('output'):
net = tf.pad(net, spatial_pad_3, 'REFLECT')
with tf.compat.v1.variable_scope('output'):
net = tf.pad(tensor=net, paddings=spatial_pad_3, mode='REFLECT')
logits = layers.conv2d(
net,
num_outputs, [7, 7],
......
......@@ -31,7 +31,7 @@ class CycleganTest(tf.test.TestCase):
img_batch = tf.zeros([2, 32, 32, 3])
model_output, _ = cyclegan.cyclegan_generator_resnet(img_batch)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(model_output)
def _test_generator_graph_helper(self, shape):
......@@ -50,13 +50,13 @@ class CycleganTest(tf.test.TestCase):
def test_generator_unknown_batch_dim(self):
"""Check that generator can take unknown batch dimension inputs."""
img = tf.placeholder(tf.float32, shape=[None, 32, None, 3])
img = tf.compat.v1.placeholder(tf.float32, shape=[None, 32, None, 3])
output_imgs, _ = cyclegan.cyclegan_generator_resnet(img)
self.assertAllEqual([None, 32, None, 3], output_imgs.shape.as_list())
def _input_and_output_same_shape_helper(self, kernel_size):
img_batch = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
img_batch = tf.compat.v1.placeholder(tf.float32, shape=[None, 32, 32, 3])
output_img_batch, _ = cyclegan.cyclegan_generator_resnet(
img_batch, kernel_size=kernel_size)
......@@ -77,10 +77,9 @@ class CycleganTest(tf.test.TestCase):
def _error_if_height_not_multiple_of_four_helper(self, height):
self.assertRaisesRegexp(
ValueError,
'The input height must be a multiple of 4.',
ValueError, 'The input height must be a multiple of 4.',
cyclegan.cyclegan_generator_resnet,
tf.placeholder(tf.float32, shape=[None, height, 32, 3]))
tf.compat.v1.placeholder(tf.float32, shape=[None, height, 32, 3]))
def test_error_if_height_not_multiple_of_four_height29(self):
self._error_if_height_not_multiple_of_four_helper(29)
......@@ -93,10 +92,9 @@ class CycleganTest(tf.test.TestCase):
def _error_if_width_not_multiple_of_four_helper(self, width):
self.assertRaisesRegexp(
ValueError,
'The input width must be a multiple of 4.',
ValueError, 'The input width must be a multiple of 4.',
cyclegan.cyclegan_generator_resnet,
tf.placeholder(tf.float32, shape=[None, 32, width, 3]))
tf.compat.v1.placeholder(tf.float32, shape=[None, 32, width, 3]))
def test_error_if_width_not_multiple_of_four_width29(self):
self._error_if_width_not_multiple_of_four_helper(29)
......
......@@ -82,7 +82,8 @@ def discriminator(inputs,
inp_shape = inputs.get_shape().as_list()[1]
end_points = {}
with tf.variable_scope(scope, values=[inputs], reuse=reuse) as scope:
with tf.compat.v1.variable_scope(
scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d],
stride=2,
......@@ -156,7 +157,8 @@ def generator(inputs,
end_points = {}
num_layers = int(log(final_size, 2)) - 1
with tf.variable_scope(scope, values=[inputs], reuse=reuse) as scope:
with tf.compat.v1.variable_scope(
scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d_transpose],
normalizer_fn=normalizer_fn,
......
......@@ -27,21 +27,21 @@ from nets import dcgan
class DCGANTest(tf.test.TestCase):
def test_generator_run(self):
tf.set_random_seed(1234)
noise = tf.random_normal([100, 64])
tf.compat.v1.set_random_seed(1234)
noise = tf.random.normal([100, 64])
image, _ = dcgan.generator(noise)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
image.eval()
def test_generator_graph(self):
tf.set_random_seed(1234)
tf.compat.v1.set_random_seed(1234)
# Check graph construction for a number of image size/depths and batch
# sizes.
for i, batch_size in zip(xrange(3, 7), xrange(3, 8)):
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
final_size = 2 ** i
noise = tf.random_normal([batch_size, 64])
noise = tf.random.normal([batch_size, 64])
image, end_points = dcgan.generator(
noise,
depth=32,
......@@ -71,19 +71,19 @@ class DCGANTest(tf.test.TestCase):
dcgan.generator(correct_input, final_size=4)
def test_discriminator_run(self):
image = tf.random_uniform([5, 32, 32, 3], -1, 1)
image = tf.random.uniform([5, 32, 32, 3], -1, 1)
output, _ = dcgan.discriminator(image)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
output.eval()
def test_discriminator_graph(self):
# Check graph construction for a number of image size/depths and batch
# sizes.
for i, batch_size in zip(xrange(1, 6), xrange(3, 8)):
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
img_w = 2 ** i
image = tf.random_uniform([batch_size, img_w, img_w, 3], -1, 1)
image = tf.random.uniform([batch_size, img_w, img_w, 3], -1, 1)
output, end_points = dcgan.discriminator(
image,
depth=32)
......@@ -103,7 +103,8 @@ class DCGANTest(tf.test.TestCase):
with self.assertRaises(ValueError):
dcgan.discriminator(wrong_dim_img)
spatially_undefined_shape = tf.placeholder(tf.float32, [5, 32, None, 3])
spatially_undefined_shape = tf.compat.v1.placeholder(
tf.float32, [5, 32, None, 3])
with self.assertRaises(ValueError):
dcgan.discriminator(spatially_undefined_shape)
......
......@@ -31,7 +31,10 @@ from nets import i3d_utils
from nets import s3dg
slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
conv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal
......@@ -149,12 +152,12 @@ def i3d(inputs,
activation.
"""
# Final pooling and prediction
with tf.variable_scope(
with tf.compat.v1.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with slim.arg_scope(
[slim.batch_norm, slim.dropout], is_training=is_training):
net, end_points = i3d_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
with tf.compat.v1.variable_scope('Logits'):
kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])
net = slim.avg_pool3d(
net, kernel_size, stride=1, scope='AvgPool_0a_7x7')
......@@ -166,7 +169,7 @@ def i3d(inputs,
normalizer_fn=None,
scope='Conv2d_0c_1x1')
# Temporal average pooling.
logits = tf.reduce_mean(logits, axis=1)
logits = tf.reduce_mean(input_tensor=logits, axis=1)
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
......
......@@ -31,7 +31,7 @@ class I3DTest(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3))
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
logits, end_points = i3d.i3d(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
......@@ -45,7 +45,7 @@ class I3DTest(tf.test.TestCase):
num_frames = 64
height, width = 224, 224
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3))
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_6c, end_points = i3d.i3d_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
......@@ -68,7 +68,7 @@ class I3DTest(tf.test.TestCase):
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3))
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
out_tensor, end_points = i3d.i3d_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
......@@ -80,7 +80,7 @@ class I3DTest(tf.test.TestCase):
num_frames = 64
height, width = 224, 224
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3))
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
_, end_points = i3d.i3d_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Conv2d_1a_7x7': [5, 32, 112, 112, 64],
......@@ -111,7 +111,7 @@ class I3DTest(tf.test.TestCase):
num_frames = 64
height, width = 112, 112
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3))
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_5c, _ = i3d.i3d_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
......@@ -122,7 +122,7 @@ class I3DTest(tf.test.TestCase):
num_frames = 10
height, width = 224, 224
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3))
inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_5c, _ = i3d.i3d_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
......@@ -134,13 +134,13 @@ class I3DTest(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, num_frames, height, width, 3))
eval_inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
logits, _ = i3d.i3d(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
......
......@@ -228,13 +228,13 @@ def inception_block_v1_3d(inputs,
"""
use_gating = self_gating_fn is not None
with tf.variable_scope(scope):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(scope):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = layers.conv3d(
inputs, num_outputs_0_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
if use_gating:
branch_0 = self_gating_fn(branch_0, scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = layers.conv3d(
inputs, num_outputs_1_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_1 = conv3d_spatiotemporal(
......@@ -242,7 +242,7 @@ def inception_block_v1_3d(inputs,
scope='Conv2d_0b_3x3')
if use_gating:
branch_1 = self_gating_fn(branch_1, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = layers.conv3d(
inputs, num_outputs_2_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_2 = conv3d_spatiotemporal(
......@@ -250,7 +250,7 @@ def inception_block_v1_3d(inputs,
scope='Conv2d_0b_3x3')
if use_gating:
branch_2 = self_gating_fn(branch_2, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = layers.max_pool3d(inputs, [3, 3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv3d(
branch_3, num_outputs_3_0b, [1, 1, 1], scope='Conv2d_0b_1x1')
......
......@@ -33,13 +33,13 @@ slim = contrib_slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
......@@ -59,10 +59,10 @@ def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
......@@ -85,10 +85,10 @@ def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
......@@ -155,7 +155,7 @@ def inception_resnet_v2_base(inputs,
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with tf.compat.v1.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
......@@ -188,20 +188,20 @@ def inception_resnet_v2_base(inputs,
if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope('Mixed_5b'):
with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
......@@ -218,12 +218,12 @@ def inception_resnet_v2_base(inputs,
# 33 x 33 x 1088 if output_stride == 16
use_atrous = output_stride == 8
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope('Mixed_6a'):
with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
......@@ -231,7 +231,7 @@ def inception_resnet_v2_base(inputs,
stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='MaxPool_1a_3x3')
......@@ -251,25 +251,25 @@ def inception_resnet_v2_base(inputs,
'PreAuxlogits end_point for now.')
# 8 x 8 x 2080
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope('Mixed_7a'):
with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2,
padding=padding,
scope='MaxPool_1a_3x3')
......@@ -320,8 +320,8 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs],
reuse=reuse) as scope:
with tf.compat.v1.variable_scope(
scope, 'InceptionResnetV2', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
......@@ -329,7 +329,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
activation_fn=activation_fn)
if create_aux_logits and num_classes:
with tf.variable_scope('AuxLogits'):
with tf.compat.v1.variable_scope('AuxLogits'):
aux = end_points['PreAuxLogits']
aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3')
......@@ -341,7 +341,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
scope='Logits')
end_points['AuxLogits'] = aux
with tf.variable_scope('Logits'):
with tf.compat.v1.variable_scope('Logits'):
# TODO(sguada,arnoegw): Consider adding a parameter global_pool which
# can be set to False to disable pooling here (as in resnet_*()).
kernel_size = net.get_shape()[1:3]
......@@ -349,7 +349,8 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_8x8')
else:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if not num_classes:
return net, end_points
......@@ -371,7 +372,7 @@ def inception_resnet_v2_arg_scope(
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
batch_norm_updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS,
batch_norm_scale=False):
"""Returns the scope with the default parameters for inception_resnet_v2.
......
......@@ -30,7 +30,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('AuxLogits' in endpoints)
auxlogits = endpoints['AuxLogits']
......@@ -47,7 +47,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_resnet_v2(inputs, num_classes,
create_aux_logits=False)
self.assertTrue('AuxLogits' not in endpoints)
......@@ -60,7 +60,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299
num_classes = None
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
net, endpoints = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('AuxLogits' not in endpoints)
self.assertTrue('Logits' not in endpoints)
......@@ -73,7 +73,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('Logits' in end_points)
logits = end_points['Logits']
......@@ -91,7 +91,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_resnet_v2_base(inputs)
self.assertTrue(net.op.name.startswith('InceptionResnetV2/Conv2d_7b_1x1'))
self.assertListEqual(net.get_shape().as_list(),
......@@ -111,7 +111,7 @@ class InceptionTest(tf.test.TestCase):
'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint=endpoint)
if endpoint != 'PreAuxLogits':
......@@ -123,7 +123,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits')
endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32],
......@@ -149,7 +149,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits', align_feature_maps=True)
endpoints_shapes = {'Conv2d_1a_3x3': [5, 150, 150, 32],
......@@ -175,7 +175,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits', output_stride=8)
endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32],
......@@ -202,15 +202,17 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
with tf.compat.v1.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
with tf.compat.v1.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
......@@ -218,7 +220,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 150, 150
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
......@@ -232,7 +234,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 330, 400
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
......@@ -246,15 +248,15 @@ class InceptionTest(tf.test.TestCase):
height, width = 330, 400
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (batch_size, None, None, 3))
inputs = tf.compat.v1.placeholder(tf.float32, (batch_size, None, None, 3))
logits, end_points = inception.inception_resnet_v2(
inputs, num_classes, create_aux_logits=False)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1']
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.compat.v1.global_variables_initializer())
logits_out, pre_pool_out = sess.run([logits, pre_pool],
{inputs: images.eval()})
self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))
......@@ -265,13 +267,13 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
......@@ -280,12 +282,12 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
......@@ -295,39 +297,40 @@ class InceptionTest(tf.test.TestCase):
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_resnet_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testNoBatchNormScaleByDefault(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with contrib_slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
inception.inception_resnet_v2(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with contrib_slim.arg_scope(
inception.inception_resnet_v2_arg_scope(batch_norm_scale=True)):
inception.inception_resnet_v2(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
......@@ -30,12 +30,13 @@ from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
def inception_arg_scope(weight_decay=0.00004,
def inception_arg_scope(
weight_decay=0.00004,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
batch_norm_updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS,
batch_norm_scale=False):
"""Defines the default arg scope for inception models.
......
......@@ -24,7 +24,10 @@ from tensorflow.contrib import slim as contrib_slim
from nets import inception_utils
slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
def inception_v1_base(inputs,
......@@ -59,7 +62,7 @@ def inception_v1_base(inputs,
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with tf.compat.v1.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)):
......@@ -94,16 +97,16 @@ def inception_v1_base(inputs,
return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -112,16 +115,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -135,16 +138,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -153,16 +156,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -171,16 +174,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -189,16 +192,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -207,16 +210,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -230,16 +233,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -248,16 +251,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -313,14 +316,16 @@ def inception_v1(inputs,
activation.
"""
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV1', [inputs], reuse=reuse) as scope:
with tf.compat.v1.variable_scope(
scope, 'InceptionV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
with tf.compat.v1.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
......
......@@ -34,7 +34,7 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'InceptionV1/Logits/SpatialSqueeze'))
......@@ -49,7 +49,7 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
......@@ -60,7 +60,7 @@ class InceptionV1Test(tf.test.TestCase):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
......@@ -82,7 +82,7 @@ class InceptionV1Test(tf.test.TestCase):
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
......@@ -93,7 +93,7 @@ class InceptionV1Test(tf.test.TestCase):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v1_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {
......@@ -125,7 +125,7 @@ class InceptionV1Test(tf.test.TestCase):
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
......@@ -136,7 +136,7 @@ class InceptionV1Test(tf.test.TestCase):
batch_size = 5
height, width = 112, 112
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
mixed_5c, _ = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
......@@ -147,7 +147,7 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 28, 28
channels = 192
inputs = tf.random_uniform((batch_size, height, width, channels))
inputs = tf.random.uniform((batch_size, height, width, channels))
_, end_points = inception.inception_v1_base(
inputs, include_root_block=False)
endpoints_shapes = {
......@@ -172,31 +172,33 @@ class InceptionV1Test(tf.test.TestCase):
expected_shape)
def testUnknownImageShape(self):
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
......@@ -204,7 +206,7 @@ class InceptionV1Test(tf.test.TestCase):
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
......@@ -213,15 +215,15 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
......@@ -230,13 +232,13 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
......@@ -246,50 +248,51 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
images = tf.random.uniform([1, 224, 224, 3])
logits, _ = inception.inception_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.compat.v1.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v1_arg_scope(batch_norm_scale=True)):
inception.inception_v1(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
......@@ -24,7 +24,10 @@ from tensorflow.contrib import slim as contrib_slim
from nets import inception_utils
slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
def inception_v2_base(inputs,
......@@ -92,7 +95,7 @@ def inception_v2_base(inputs,
)
concat_dim = 3 if data_format == 'NHWC' else 1
with tf.variable_scope(scope, 'InceptionV2', [inputs]):
with tf.compat.v1.variable_scope(scope, 'InceptionV2', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
......@@ -167,17 +170,17 @@ def inception_v2_base(inputs,
# 28 x 28 x 192
# Inception module.
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -186,7 +189,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(32), [1, 1],
......@@ -198,17 +201,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 256
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -217,7 +220,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(64), [1, 1],
......@@ -229,15 +232,15 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 320
end_point = 'Mixed_4a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -246,7 +249,7 @@ def inception_v2_base(inputs,
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2])
......@@ -254,17 +257,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -273,7 +276,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
......@@ -285,17 +288,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -304,7 +307,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
......@@ -316,17 +319,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -335,7 +338,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
......@@ -347,17 +350,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -366,7 +369,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
......@@ -378,15 +381,15 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_5a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -395,7 +398,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3')
net = tf.concat(
......@@ -404,17 +407,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -423,7 +426,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
......@@ -435,17 +438,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -454,7 +457,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
......@@ -525,16 +528,18 @@ def inception_v2(inputs,
raise ValueError('depth_multiplier is not greater than zero.')
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV2', [inputs], reuse=reuse) as scope:
with tf.compat.v1.variable_scope(
scope, 'InceptionV2', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v2_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
with tf.variable_scope('Logits'):
with tf.compat.v1.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
......
......@@ -34,7 +34,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'InceptionV2/Logits/SpatialSqueeze'))
......@@ -49,7 +49,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV2/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
......@@ -60,7 +60,7 @@ class InceptionV2Test(tf.test.TestCase):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
mixed_5c, end_points = inception.inception_v2_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV2/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
......@@ -70,7 +70,7 @@ class InceptionV2Test(tf.test.TestCase):
'Mixed_5b', 'Mixed_5c', 'Conv2d_1a_7x7',
'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
self.assertItemsEqual(list(end_points.keys()), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
......@@ -81,18 +81,18 @@ class InceptionV2Test(tf.test.TestCase):
'Mixed_5a', 'Mixed_5b', 'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v2_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV2/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys())
self.assertItemsEqual(endpoints[:index + 1], list(end_points.keys()))
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Mixed_3b': [batch_size, 28, 28, 256],
......@@ -110,7 +110,8 @@ class InceptionV2Test(tf.test.TestCase):
'Conv2d_2b_1x1': [batch_size, 56, 56, 64],
'Conv2d_2c_3x3': [batch_size, 56, 56, 192],
'MaxPool_3a_3x3': [batch_size, 28, 28, 192]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
self.assertItemsEqual(
list(endpoints_shapes.keys()), list(end_points.keys()))
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
......@@ -120,7 +121,7 @@ class InceptionV2Test(tf.test.TestCase):
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v2_arg_scope()):
inception.inception_v2_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
......@@ -132,7 +133,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
......@@ -152,7 +153,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
......@@ -172,7 +173,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception.inception_v2(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
......@@ -182,7 +183,7 @@ class InceptionV2Test(tf.test.TestCase):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs)
endpoint_keys = [
......@@ -205,7 +206,7 @@ class InceptionV2Test(tf.test.TestCase):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs)
endpoint_keys = [
......@@ -213,7 +214,7 @@ class InceptionV2Test(tf.test.TestCase):
if key.startswith('Mixed') or key.startswith('Conv')
]
inputs_in_nchw = tf.random_uniform((batch_size, 3, height, width))
inputs_in_nchw = tf.random.uniform((batch_size, 3, height, width))
_, end_points_with_replacement = inception.inception_v2_base(
inputs_in_nchw, use_separable_conv=False, data_format='NCHW')
......@@ -221,7 +222,7 @@ class InceptionV2Test(tf.test.TestCase):
# shape from the original shape with the 'NHWC' layout.
for key in endpoint_keys:
transposed_original_shape = tf.transpose(
end_points[key], [0, 3, 1, 2]).get_shape().as_list()
a=end_points[key], perm=[0, 3, 1, 2]).get_shape().as_list()
self.assertTrue(key in end_points_with_replacement)
new_shape = end_points_with_replacement[key].get_shape().as_list()
self.assertListEqual(transposed_original_shape, new_shape)
......@@ -230,7 +231,7 @@ class InceptionV2Test(tf.test.TestCase):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
# 'NCWH' data format is not supported.
with self.assertRaises(ValueError):
......@@ -245,7 +246,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 112, 112
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
......@@ -259,7 +260,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 28, 28
channels = 192
inputs = tf.random_uniform((batch_size, height, width, channels))
inputs = tf.random.uniform((batch_size, height, width, channels))
_, end_points = inception.inception_v2_base(
inputs, include_root_block=False)
endpoints_shapes = {
......@@ -274,7 +275,8 @@ class InceptionV2Test(tf.test.TestCase):
'Mixed_5b': [batch_size, 7, 7, 1024],
'Mixed_5c': [batch_size, 7, 7, 1024]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
self.assertItemsEqual(
list(endpoints_shapes.keys()), list(end_points.keys()))
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
......@@ -282,31 +284,33 @@ class InceptionV2Test(tf.test.TestCase):
expected_shape)
def testUnknownImageShape(self):
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v2(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
......@@ -314,7 +318,7 @@ class InceptionV2Test(tf.test.TestCase):
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
......@@ -323,15 +327,15 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
......@@ -340,13 +344,13 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
......@@ -356,50 +360,51 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
images = tf.random.uniform([1, 224, 224, 3])
logits, _ = inception.inception_v2(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.compat.v1.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v2_arg_scope()):
inception.inception_v2(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v2_arg_scope(batch_norm_scale=True)):
inception.inception_v2(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
......@@ -24,7 +24,10 @@ from tensorflow.contrib import slim as contrib_slim
from nets import inception_utils
slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
def inception_v3_base(inputs,
......@@ -97,7 +100,7 @@ def inception_v3_base(inputs,
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs]):
with tf.compat.v1.variable_scope(scope, 'InceptionV3', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='VALID'):
# 299 x 299 x 3
......@@ -142,20 +145,20 @@ def inception_v3_base(inputs,
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
scope='Conv2d_0b_1x1')
......@@ -165,21 +168,21 @@ def inception_v3_base(inputs,
# mixed_1: 35 x 35 x 288.
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv_1_0c_5x5')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1],
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
......@@ -189,20 +192,20 @@ def inception_v3_base(inputs,
# mixed_2: 35 x 35 x 288.
end_point = 'Mixed_5d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
......@@ -212,17 +215,17 @@ def inception_v3_base(inputs,
# mixed_3: 17 x 17 x 768.
end_point = 'Mixed_6a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
......@@ -231,16 +234,16 @@ def inception_v3_base(inputs,
# mixed4: 17 x 17 x 768.
end_point = 'Mixed_6b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0b_7x1')
......@@ -250,7 +253,7 @@ def inception_v3_base(inputs,
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
......@@ -260,16 +263,16 @@ def inception_v3_base(inputs,
# mixed_5: 17 x 17 x 768.
end_point = 'Mixed_6c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
......@@ -279,7 +282,7 @@ def inception_v3_base(inputs,
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
......@@ -288,16 +291,16 @@ def inception_v3_base(inputs,
if end_point == final_endpoint: return net, end_points
# mixed_6: 17 x 17 x 768.
end_point = 'Mixed_6d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
......@@ -307,7 +310,7 @@ def inception_v3_base(inputs,
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
......@@ -317,16 +320,16 @@ def inception_v3_base(inputs,
# mixed_7: 17 x 17 x 768.
end_point = 'Mixed_6e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0b_7x1')
......@@ -336,7 +339,7 @@ def inception_v3_base(inputs,
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
......@@ -346,12 +349,12 @@ def inception_v3_base(inputs,
# mixed_8: 8 x 8 x 1280.
end_point = 'Mixed_7a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
......@@ -359,7 +362,7 @@ def inception_v3_base(inputs,
scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
......@@ -367,22 +370,22 @@ def inception_v3_base(inputs,
if end_point == final_endpoint: return net, end_points
# mixed_9: 8 x 8 x 2048.
end_point = 'Mixed_7b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
......@@ -392,22 +395,22 @@ def inception_v3_base(inputs,
# mixed_10: 8 x 8 x 2048.
end_point = 'Mixed_7c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
......@@ -483,7 +486,8 @@ def inception_v3(inputs,
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs], reuse=reuse) as scope:
with tf.compat.v1.variable_scope(
scope, 'InceptionV3', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v3_base(
......@@ -495,7 +499,7 @@ def inception_v3(inputs,
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e']
with tf.variable_scope('AuxLogits'):
with tf.compat.v1.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID',
scope='AvgPool_1a_5x5')
......@@ -518,10 +522,11 @@ def inception_v3(inputs,
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
with tf.compat.v1.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='GlobalPool')
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='GlobalPool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment