"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "d74483c47a95995c5e7943462aa6cde74cff7fb7"
Commit d4eedbb9 authored by Mark Sandler's avatar Mark Sandler Committed by Hongkun Yu
Browse files

Merged commit includes the following changes: (#8077)



Internal cleanup (py2->py3) plus the following changes:

285513318  by Sergio Guadarrama:

    Adds a script for post-training quantization

284222305  by Sergio Guadarrama:

    Modified squeeze-excite operation to accommodate tensors of undefined (Nonetype) H/W.

282028343  by Sergio Guadarrama:

    Add MobilenetV3 and MobilenetEdgeTPU to the slim/nets_factory.

PiperOrigin-RevId: 289455329
Co-authored-by: default avatarSergio Guadarrama <sguada@gmail.com>
parent 0e0a94a6
# Description: # Description:
# Contains files for loading, training and evaluating TF-Slim-based models. # Contains files for loading, training and evaluating TF-Slim-based models.
# load("//devtools/python/blaze:python3.bzl", "py2and3_test") # load("//devtools/python/blaze:python3.bzl", "py2and3_test")
load("//devtools/python/blaze:pytype.bzl", "pytype_strict_binary")
package( package(
default_visibility = ["//visibility:public"], default_visibility = ["//visibility:public"],
...@@ -475,11 +476,10 @@ py_test( ...@@ -475,11 +476,10 @@ py_test(
], ],
) )
py_test( py_test( # py2and3_test
name = "inception_v2_test", name = "inception_v2_test",
size = "large", size = "large",
srcs = ["nets/inception_v2_test.py"], srcs = ["nets/inception_v2_test.py"],
python_version = "PY2",
shard_count = 3, shard_count = 3,
srcs_version = "PY2AND3", srcs_version = "PY2AND3",
deps = [ deps = [
...@@ -590,14 +590,14 @@ py_library( ...@@ -590,14 +590,14 @@ py_library(
], ],
) )
py_test( py_test( # py2and3_test
name = "mobilenet_v2_test", name = "mobilenet_v2_test",
srcs = ["nets/mobilenet/mobilenet_v2_test.py"], srcs = ["nets/mobilenet/mobilenet_v2_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3", srcs_version = "PY2AND3",
deps = [ deps = [
":mobilenet", ":mobilenet",
":mobilenet_common", ":mobilenet_common",
"//third_party/py/six",
# "//tensorflow", # "//tensorflow",
# "//tensorflow/contrib/slim", # "//tensorflow/contrib/slim",
], ],
...@@ -755,11 +755,10 @@ py_library( ...@@ -755,11 +755,10 @@ py_library(
], ],
) )
py_test( py_test( # py2and3_test
name = "overfeat_test", name = "overfeat_test",
size = "medium", size = "medium",
srcs = ["nets/overfeat_test.py"], srcs = ["nets/overfeat_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3", srcs_version = "PY2AND3",
deps = [ deps = [
":overfeat", ":overfeat",
...@@ -890,11 +889,10 @@ py_library( ...@@ -890,11 +889,10 @@ py_library(
], ],
) )
py_test( py_test( # py2and3_test
name = "vgg_test", name = "vgg_test",
size = "medium", size = "medium",
srcs = ["nets/vgg_test.py"], srcs = ["nets/vgg_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3", srcs_version = "PY2AND3",
deps = [ deps = [
":vgg", ":vgg",
...@@ -912,11 +910,10 @@ py_library( ...@@ -912,11 +910,10 @@ py_library(
], ],
) )
py_test( py_test( # py2and3_test
name = "nets_factory_test", name = "nets_factory_test",
size = "large", size = "large",
srcs = ["nets/nets_factory_test.py"], srcs = ["nets/nets_factory_test.py"],
python_version = "PY2",
shard_count = 3, shard_count = 3,
srcs_version = "PY2AND3", srcs_version = "PY2AND3",
deps = [ deps = [
...@@ -925,9 +922,24 @@ py_test( ...@@ -925,9 +922,24 @@ py_test(
], ],
) )
pytype_strict_binary(
name = "post_training_quantization",
srcs = ["nets/post_training_quantization.py"],
python_version = "PY3",
deps = [
":nets_factory",
":preprocessing_factory",
"//third_party/py/absl:app",
"//third_party/py/absl/flags",
# "//tensorflow",
# "//tensorflow_datasets",
],
)
py_library( py_library(
name = "train_image_classifier_lib", name = "train_image_classifier_lib",
srcs = ["train_image_classifier.py"], srcs = ["train_image_classifier.py"],
srcs_version = "PY2AND3",
deps = [ deps = [
":dataset_factory", ":dataset_factory",
":model_deploy", ":model_deploy",
......
...@@ -201,7 +201,7 @@ def create_tf_record_for_visualwakewords_dataset(annotations_file, image_dir, ...@@ -201,7 +201,7 @@ def create_tf_record_for_visualwakewords_dataset(annotations_file, image_dir,
groundtruth_data = json.load(fid) groundtruth_data = json.load(fid)
images = groundtruth_data['images'] images = groundtruth_data['images']
annotations_index = groundtruth_data['annotations'] annotations_index = groundtruth_data['annotations']
annotations_index = {int(k): v for k, v in annotations_index.items()} annotations_index = {int(k): v for k, v in annotations_index.iteritems()}
# convert 'unicode' key to 'int' key after we parse the json file # convert 'unicode' key to 'int' key after we parse the json file
for idx, image in enumerate(images): for idx, image in enumerate(images):
......
...@@ -40,13 +40,16 @@ import tensorflow as tf ...@@ -40,13 +40,16 @@ import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
def alexnet_v2_arg_scope(weight_decay=0.0005): def alexnet_v2_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected], with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu, activation_fn=tf.nn.relu,
biases_initializer=tf.constant_initializer(0.1), biases_initializer=tf.compat.v1.constant_initializer(0.1),
weights_regularizer=slim.l2_regularizer(weight_decay)): weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], padding='SAME'): with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc: with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
...@@ -94,7 +97,7 @@ def alexnet_v2(inputs, ...@@ -94,7 +97,7 @@ def alexnet_v2(inputs,
or None). or None).
end_points: a dict of tensors with intermediate activations. end_points: a dict of tensors with intermediate activations.
""" """
with tf.variable_scope(scope, 'alexnet_v2', [inputs]) as sc: with tf.compat.v1.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points' end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d. # Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
...@@ -110,9 +113,10 @@ def alexnet_v2(inputs, ...@@ -110,9 +113,10 @@ def alexnet_v2(inputs,
net = slim.max_pool2d(net, [3, 3], 2, scope='pool5') net = slim.max_pool2d(net, [3, 3], 2, scope='pool5')
# Use conv2d instead of fully_connected layers. # Use conv2d instead of fully_connected layers.
with slim.arg_scope([slim.conv2d], with slim.arg_scope(
weights_initializer=trunc_normal(0.005), [slim.conv2d],
biases_initializer=tf.constant_initializer(0.1)): weights_initializer=trunc_normal(0.005),
biases_initializer=tf.compat.v1.constant_initializer(0.1)):
net = slim.conv2d(net, 4096, [5, 5], padding='VALID', net = slim.conv2d(net, 4096, [5, 5], padding='VALID',
scope='fc6') scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
...@@ -122,16 +126,19 @@ def alexnet_v2(inputs, ...@@ -122,16 +126,19 @@ def alexnet_v2(inputs,
end_points = slim.utils.convert_collection_to_dict( end_points = slim.utils.convert_collection_to_dict(
end_points_collection) end_points_collection)
if global_pool: if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool') net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net end_points['global_pool'] = net
if num_classes: if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7') scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1], net = slim.conv2d(
activation_fn=None, net,
normalizer_fn=None, num_classes, [1, 1],
biases_initializer=tf.zeros_initializer(), activation_fn=None,
scope='fc8') normalizer_fn=None,
biases_initializer=tf.compat.v1.zeros_initializer(),
scope='fc8')
if spatial_squeeze: if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed') net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net end_points[sc.name + '/fc8'] = net
......
...@@ -32,7 +32,7 @@ class AlexnetV2Test(tf.test.TestCase): ...@@ -32,7 +32,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes) logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed') self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -43,7 +43,7 @@ class AlexnetV2Test(tf.test.TestCase): ...@@ -43,7 +43,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 300, 400 height, width = 300, 400
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False) logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd') self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -54,7 +54,7 @@ class AlexnetV2Test(tf.test.TestCase): ...@@ -54,7 +54,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 256, 256 height, width = 256, 256
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False, logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False,
global_pool=True) global_pool=True)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd') self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
...@@ -66,7 +66,7 @@ class AlexnetV2Test(tf.test.TestCase): ...@@ -66,7 +66,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes) _, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1', expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1', 'alexnet_v2/pool1',
...@@ -87,7 +87,7 @@ class AlexnetV2Test(tf.test.TestCase): ...@@ -87,7 +87,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = None num_classes = None
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = alexnet.alexnet_v2(inputs, num_classes) net, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1', expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1', 'alexnet_v2/pool1',
...@@ -110,7 +110,7 @@ class AlexnetV2Test(tf.test.TestCase): ...@@ -110,7 +110,7 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes) alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights', expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases', 'alexnet_v2/conv1/biases',
...@@ -137,11 +137,11 @@ class AlexnetV2Test(tf.test.TestCase): ...@@ -137,11 +137,11 @@ class AlexnetV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3)) eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False) logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes]) [batch_size, num_classes])
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size]) self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self): def testTrainEvalWithReuse(self):
...@@ -151,29 +151,29 @@ class AlexnetV2Test(tf.test.TestCase): ...@@ -151,29 +151,29 @@ class AlexnetV2Test(tf.test.TestCase):
eval_height, eval_width = 300, 400 eval_height, eval_width = 300, 400
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
train_inputs = tf.random_uniform( train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3)) (train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs) logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes]) [train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables() tf.compat.v1.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform( eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3)) (eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False, logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False) spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes]) [eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2]) logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self): def testForward(self):
batch_size = 1 batch_size = 1
height, width = 224, 224 height, width = 224, 224
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs) logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits) output = sess.run(logits)
self.assertTrue(output.any()) self.assertTrue(output.any())
......
...@@ -23,7 +23,9 @@ from tensorflow.contrib import slim as contrib_slim ...@@ -23,7 +23,9 @@ from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(stddev=stddev) # pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
stddev=stddev)
def cifarnet(images, num_classes=10, is_training=False, def cifarnet(images, num_classes=10, is_training=False,
...@@ -61,7 +63,7 @@ def cifarnet(images, num_classes=10, is_training=False, ...@@ -61,7 +63,7 @@ def cifarnet(images, num_classes=10, is_training=False,
""" """
end_points = {} end_points = {}
with tf.variable_scope(scope, 'CifarNet', [images]): with tf.compat.v1.variable_scope(scope, 'CifarNet', [images]):
net = slim.conv2d(images, 64, [5, 5], scope='conv1') net = slim.conv2d(images, 64, [5, 5], scope='conv1')
end_points['conv1'] = net end_points['conv1'] = net
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1') net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
...@@ -82,12 +84,14 @@ def cifarnet(images, num_classes=10, is_training=False, ...@@ -82,12 +84,14 @@ def cifarnet(images, num_classes=10, is_training=False,
end_points['fc4'] = net end_points['fc4'] = net
if not num_classes: if not num_classes:
return net, end_points return net, end_points
logits = slim.fully_connected(net, num_classes, logits = slim.fully_connected(
biases_initializer=tf.zeros_initializer(), net,
weights_initializer=trunc_normal(1/192.0), num_classes,
weights_regularizer=None, biases_initializer=tf.compat.v1.zeros_initializer(),
activation_fn=None, weights_initializer=trunc_normal(1 / 192.0),
scope='logits') weights_regularizer=None,
activation_fn=None,
scope='logits')
end_points['Logits'] = logits end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions') end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
...@@ -107,11 +111,12 @@ def cifarnet_arg_scope(weight_decay=0.004): ...@@ -107,11 +111,12 @@ def cifarnet_arg_scope(weight_decay=0.004):
""" """
with slim.arg_scope( with slim.arg_scope(
[slim.conv2d], [slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=5e-2), weights_initializer=tf.compat.v1.truncated_normal_initializer(
stddev=5e-2),
activation_fn=tf.nn.relu): activation_fn=tf.nn.relu):
with slim.arg_scope( with slim.arg_scope(
[slim.fully_connected], [slim.fully_connected],
biases_initializer=tf.constant_initializer(0.1), biases_initializer=tf.compat.v1.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04), weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay), weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc: activation_fn=tf.nn.relu) as sc:
......
...@@ -61,7 +61,8 @@ def cyclegan_arg_scope(instance_norm_center=True, ...@@ -61,7 +61,8 @@ def cyclegan_arg_scope(instance_norm_center=True,
[layers.conv2d], [layers.conv2d],
normalizer_fn=layers.instance_norm, normalizer_fn=layers.instance_norm,
normalizer_params=instance_norm_params, normalizer_params=instance_norm_params,
weights_initializer=tf.random_normal_initializer(0, weights_init_stddev), weights_initializer=tf.compat.v1.random_normal_initializer(
0, weights_init_stddev),
weights_regularizer=weights_regularizer) as sc: weights_regularizer=weights_regularizer) as sc:
return sc return sc
...@@ -90,8 +91,8 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose', ...@@ -90,8 +91,8 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose',
Raises: Raises:
ValueError: if `method` is not recognized. ValueError: if `method` is not recognized.
""" """
with tf.variable_scope('upconv'): with tf.compat.v1.variable_scope('upconv'):
net_shape = tf.shape(net) net_shape = tf.shape(input=net)
height = net_shape[1] height = net_shape[1]
width = net_shape[2] width = net_shape[2]
...@@ -101,15 +102,16 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose', ...@@ -101,15 +102,16 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose',
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]]) spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == 'nn_upsample_conv': if method == 'nn_upsample_conv':
net = tf.image.resize_nearest_neighbor( net = tf.image.resize(
net, [stride[0] * height, stride[1] * width]) net, [stride[0] * height, stride[1] * width],
net = tf.pad(net, spatial_pad_1, pad_mode) method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
net = tf.pad(tensor=net, paddings=spatial_pad_1, mode=pad_mode)
net = layers.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid') net = layers.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
elif method == 'bilinear_upsample_conv': elif method == 'bilinear_upsample_conv':
net = tf.image.resize_bilinear( net = tf.compat.v1.image.resize_bilinear(
net, [stride[0] * height, stride[1] * width], net, [stride[0] * height, stride[1] * width],
align_corners=align_corners) align_corners=align_corners)
net = tf.pad(net, spatial_pad_1, pad_mode) net = tf.pad(tensor=net, paddings=spatial_pad_1, mode=pad_mode)
net = layers.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid') net = layers.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
elif method == 'conv2d_transpose': elif method == 'conv2d_transpose':
# This corrects 1 pixel offset for images with even width and height. # This corrects 1 pixel offset for images with even width and height.
...@@ -126,7 +128,7 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose', ...@@ -126,7 +128,7 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose',
def _dynamic_or_static_shape(tensor): def _dynamic_or_static_shape(tensor):
shape = tf.shape(tensor) shape = tf.shape(input=tensor)
static_shape = contrib_util.constant_value(shape) static_shape = contrib_util.constant_value(shape)
return static_shape if static_shape is not None else shape return static_shape if static_shape is not None else shape
...@@ -204,40 +206,40 @@ def cyclegan_generator_resnet(images, ...@@ -204,40 +206,40 @@ def cyclegan_generator_resnet(images,
########### ###########
# Encoder # # Encoder #
########### ###########
with tf.variable_scope('input'): with tf.compat.v1.variable_scope('input'):
# 7x7 input stage # 7x7 input stage
net = tf.pad(images, spatial_pad_3, 'REFLECT') net = tf.pad(tensor=images, paddings=spatial_pad_3, mode='REFLECT')
net = layers.conv2d(net, num_filters, kernel_size=[7, 7], padding='VALID') net = layers.conv2d(net, num_filters, kernel_size=[7, 7], padding='VALID')
end_points['encoder_0'] = net end_points['encoder_0'] = net
with tf.variable_scope('encoder'): with tf.compat.v1.variable_scope('encoder'):
with contrib_framework.arg_scope([layers.conv2d], with contrib_framework.arg_scope([layers.conv2d],
kernel_size=kernel_size, kernel_size=kernel_size,
stride=2, stride=2,
activation_fn=tf.nn.relu, activation_fn=tf.nn.relu,
padding='VALID'): padding='VALID'):
net = tf.pad(net, paddings, 'REFLECT') net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
net = layers.conv2d(net, num_filters * 2) net = layers.conv2d(net, num_filters * 2)
end_points['encoder_1'] = net end_points['encoder_1'] = net
net = tf.pad(net, paddings, 'REFLECT') net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
net = layers.conv2d(net, num_filters * 4) net = layers.conv2d(net, num_filters * 4)
end_points['encoder_2'] = net end_points['encoder_2'] = net
################### ###################
# Residual Blocks # # Residual Blocks #
################### ###################
with tf.variable_scope('residual_blocks'): with tf.compat.v1.variable_scope('residual_blocks'):
with contrib_framework.arg_scope([layers.conv2d], with contrib_framework.arg_scope([layers.conv2d],
kernel_size=kernel_size, kernel_size=kernel_size,
stride=1, stride=1,
activation_fn=tf.nn.relu, activation_fn=tf.nn.relu,
padding='VALID'): padding='VALID'):
for block_id in xrange(num_resnet_blocks): for block_id in xrange(num_resnet_blocks):
with tf.variable_scope('block_{}'.format(block_id)): with tf.compat.v1.variable_scope('block_{}'.format(block_id)):
res_net = tf.pad(net, paddings, 'REFLECT') res_net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
res_net = layers.conv2d(res_net, num_filters * 4) res_net = layers.conv2d(res_net, num_filters * 4)
res_net = tf.pad(res_net, paddings, 'REFLECT') res_net = tf.pad(tensor=res_net, paddings=paddings, mode='REFLECT')
res_net = layers.conv2d(res_net, num_filters * 4, res_net = layers.conv2d(res_net, num_filters * 4,
activation_fn=None) activation_fn=None)
net += res_net net += res_net
...@@ -247,23 +249,23 @@ def cyclegan_generator_resnet(images, ...@@ -247,23 +249,23 @@ def cyclegan_generator_resnet(images,
########### ###########
# Decoder # # Decoder #
########### ###########
with tf.variable_scope('decoder'): with tf.compat.v1.variable_scope('decoder'):
with contrib_framework.arg_scope([layers.conv2d], with contrib_framework.arg_scope([layers.conv2d],
kernel_size=kernel_size, kernel_size=kernel_size,
stride=1, stride=1,
activation_fn=tf.nn.relu): activation_fn=tf.nn.relu):
with tf.variable_scope('decoder1'): with tf.compat.v1.variable_scope('decoder1'):
net = upsample_fn(net, num_outputs=num_filters * 2, stride=[2, 2]) net = upsample_fn(net, num_outputs=num_filters * 2, stride=[2, 2])
end_points['decoder1'] = net end_points['decoder1'] = net
with tf.variable_scope('decoder2'): with tf.compat.v1.variable_scope('decoder2'):
net = upsample_fn(net, num_outputs=num_filters, stride=[2, 2]) net = upsample_fn(net, num_outputs=num_filters, stride=[2, 2])
end_points['decoder2'] = net end_points['decoder2'] = net
with tf.variable_scope('output'): with tf.compat.v1.variable_scope('output'):
net = tf.pad(net, spatial_pad_3, 'REFLECT') net = tf.pad(tensor=net, paddings=spatial_pad_3, mode='REFLECT')
logits = layers.conv2d( logits = layers.conv2d(
net, net,
num_outputs, [7, 7], num_outputs, [7, 7],
......
...@@ -31,7 +31,7 @@ class CycleganTest(tf.test.TestCase): ...@@ -31,7 +31,7 @@ class CycleganTest(tf.test.TestCase):
img_batch = tf.zeros([2, 32, 32, 3]) img_batch = tf.zeros([2, 32, 32, 3])
model_output, _ = cyclegan.cyclegan_generator_resnet(img_batch) model_output, _ = cyclegan.cyclegan_generator_resnet(img_batch)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
sess.run(model_output) sess.run(model_output)
def _test_generator_graph_helper(self, shape): def _test_generator_graph_helper(self, shape):
...@@ -50,13 +50,13 @@ class CycleganTest(tf.test.TestCase): ...@@ -50,13 +50,13 @@ class CycleganTest(tf.test.TestCase):
def test_generator_unknown_batch_dim(self): def test_generator_unknown_batch_dim(self):
"""Check that generator can take unknown batch dimension inputs.""" """Check that generator can take unknown batch dimension inputs."""
img = tf.placeholder(tf.float32, shape=[None, 32, None, 3]) img = tf.compat.v1.placeholder(tf.float32, shape=[None, 32, None, 3])
output_imgs, _ = cyclegan.cyclegan_generator_resnet(img) output_imgs, _ = cyclegan.cyclegan_generator_resnet(img)
self.assertAllEqual([None, 32, None, 3], output_imgs.shape.as_list()) self.assertAllEqual([None, 32, None, 3], output_imgs.shape.as_list())
def _input_and_output_same_shape_helper(self, kernel_size): def _input_and_output_same_shape_helper(self, kernel_size):
img_batch = tf.placeholder(tf.float32, shape=[None, 32, 32, 3]) img_batch = tf.compat.v1.placeholder(tf.float32, shape=[None, 32, 32, 3])
output_img_batch, _ = cyclegan.cyclegan_generator_resnet( output_img_batch, _ = cyclegan.cyclegan_generator_resnet(
img_batch, kernel_size=kernel_size) img_batch, kernel_size=kernel_size)
...@@ -77,10 +77,9 @@ class CycleganTest(tf.test.TestCase): ...@@ -77,10 +77,9 @@ class CycleganTest(tf.test.TestCase):
def _error_if_height_not_multiple_of_four_helper(self, height): def _error_if_height_not_multiple_of_four_helper(self, height):
self.assertRaisesRegexp( self.assertRaisesRegexp(
ValueError, ValueError, 'The input height must be a multiple of 4.',
'The input height must be a multiple of 4.',
cyclegan.cyclegan_generator_resnet, cyclegan.cyclegan_generator_resnet,
tf.placeholder(tf.float32, shape=[None, height, 32, 3])) tf.compat.v1.placeholder(tf.float32, shape=[None, height, 32, 3]))
def test_error_if_height_not_multiple_of_four_height29(self): def test_error_if_height_not_multiple_of_four_height29(self):
self._error_if_height_not_multiple_of_four_helper(29) self._error_if_height_not_multiple_of_four_helper(29)
...@@ -93,10 +92,9 @@ class CycleganTest(tf.test.TestCase): ...@@ -93,10 +92,9 @@ class CycleganTest(tf.test.TestCase):
def _error_if_width_not_multiple_of_four_helper(self, width): def _error_if_width_not_multiple_of_four_helper(self, width):
self.assertRaisesRegexp( self.assertRaisesRegexp(
ValueError, ValueError, 'The input width must be a multiple of 4.',
'The input width must be a multiple of 4.',
cyclegan.cyclegan_generator_resnet, cyclegan.cyclegan_generator_resnet,
tf.placeholder(tf.float32, shape=[None, 32, width, 3])) tf.compat.v1.placeholder(tf.float32, shape=[None, 32, width, 3]))
def test_error_if_width_not_multiple_of_four_width29(self): def test_error_if_width_not_multiple_of_four_width29(self):
self._error_if_width_not_multiple_of_four_helper(29) self._error_if_width_not_multiple_of_four_helper(29)
......
...@@ -82,7 +82,8 @@ def discriminator(inputs, ...@@ -82,7 +82,8 @@ def discriminator(inputs,
inp_shape = inputs.get_shape().as_list()[1] inp_shape = inputs.get_shape().as_list()[1]
end_points = {} end_points = {}
with tf.variable_scope(scope, values=[inputs], reuse=reuse) as scope: with tf.compat.v1.variable_scope(
scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args): with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d], with slim.arg_scope([slim.conv2d],
stride=2, stride=2,
...@@ -156,7 +157,8 @@ def generator(inputs, ...@@ -156,7 +157,8 @@ def generator(inputs,
end_points = {} end_points = {}
num_layers = int(log(final_size, 2)) - 1 num_layers = int(log(final_size, 2)) - 1
with tf.variable_scope(scope, values=[inputs], reuse=reuse) as scope: with tf.compat.v1.variable_scope(
scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args): with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d_transpose], with slim.arg_scope([slim.conv2d_transpose],
normalizer_fn=normalizer_fn, normalizer_fn=normalizer_fn,
......
...@@ -27,21 +27,21 @@ from nets import dcgan ...@@ -27,21 +27,21 @@ from nets import dcgan
class DCGANTest(tf.test.TestCase): class DCGANTest(tf.test.TestCase):
def test_generator_run(self): def test_generator_run(self):
tf.set_random_seed(1234) tf.compat.v1.set_random_seed(1234)
noise = tf.random_normal([100, 64]) noise = tf.random.normal([100, 64])
image, _ = dcgan.generator(noise) image, _ = dcgan.generator(noise)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
image.eval() image.eval()
def test_generator_graph(self): def test_generator_graph(self):
tf.set_random_seed(1234) tf.compat.v1.set_random_seed(1234)
# Check graph construction for a number of image size/depths and batch # Check graph construction for a number of image size/depths and batch
# sizes. # sizes.
for i, batch_size in zip(xrange(3, 7), xrange(3, 8)): for i, batch_size in zip(xrange(3, 7), xrange(3, 8)):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
final_size = 2 ** i final_size = 2 ** i
noise = tf.random_normal([batch_size, 64]) noise = tf.random.normal([batch_size, 64])
image, end_points = dcgan.generator( image, end_points = dcgan.generator(
noise, noise,
depth=32, depth=32,
...@@ -71,19 +71,19 @@ class DCGANTest(tf.test.TestCase): ...@@ -71,19 +71,19 @@ class DCGANTest(tf.test.TestCase):
dcgan.generator(correct_input, final_size=4) dcgan.generator(correct_input, final_size=4)
def test_discriminator_run(self): def test_discriminator_run(self):
image = tf.random_uniform([5, 32, 32, 3], -1, 1) image = tf.random.uniform([5, 32, 32, 3], -1, 1)
output, _ = dcgan.discriminator(image) output, _ = dcgan.discriminator(image)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output.eval() output.eval()
def test_discriminator_graph(self): def test_discriminator_graph(self):
# Check graph construction for a number of image size/depths and batch # Check graph construction for a number of image size/depths and batch
# sizes. # sizes.
for i, batch_size in zip(xrange(1, 6), xrange(3, 8)): for i, batch_size in zip(xrange(1, 6), xrange(3, 8)):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
img_w = 2 ** i img_w = 2 ** i
image = tf.random_uniform([batch_size, img_w, img_w, 3], -1, 1) image = tf.random.uniform([batch_size, img_w, img_w, 3], -1, 1)
output, end_points = dcgan.discriminator( output, end_points = dcgan.discriminator(
image, image,
depth=32) depth=32)
...@@ -103,7 +103,8 @@ class DCGANTest(tf.test.TestCase): ...@@ -103,7 +103,8 @@ class DCGANTest(tf.test.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
dcgan.discriminator(wrong_dim_img) dcgan.discriminator(wrong_dim_img)
spatially_undefined_shape = tf.placeholder(tf.float32, [5, 32, None, 3]) spatially_undefined_shape = tf.compat.v1.placeholder(
tf.float32, [5, 32, None, 3])
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
dcgan.discriminator(spatially_undefined_shape) dcgan.discriminator(spatially_undefined_shape)
......
...@@ -31,7 +31,10 @@ from nets import i3d_utils ...@@ -31,7 +31,10 @@ from nets import i3d_utils
from nets import s3dg from nets import s3dg
slim = contrib_slim slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
conv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal conv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal
...@@ -149,12 +152,12 @@ def i3d(inputs, ...@@ -149,12 +152,12 @@ def i3d(inputs,
activation. activation.
""" """
# Final pooling and prediction # Final pooling and prediction
with tf.variable_scope( with tf.compat.v1.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope: scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with slim.arg_scope( with slim.arg_scope(
[slim.batch_norm, slim.dropout], is_training=is_training): [slim.batch_norm, slim.dropout], is_training=is_training):
net, end_points = i3d_base(inputs, scope=scope) net, end_points = i3d_base(inputs, scope=scope)
with tf.variable_scope('Logits'): with tf.compat.v1.variable_scope('Logits'):
kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7]) kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])
net = slim.avg_pool3d( net = slim.avg_pool3d(
net, kernel_size, stride=1, scope='AvgPool_0a_7x7') net, kernel_size, stride=1, scope='AvgPool_0a_7x7')
...@@ -166,7 +169,7 @@ def i3d(inputs, ...@@ -166,7 +169,7 @@ def i3d(inputs,
normalizer_fn=None, normalizer_fn=None,
scope='Conv2d_0c_1x1') scope='Conv2d_0c_1x1')
# Temporal average pooling. # Temporal average pooling.
logits = tf.reduce_mean(logits, axis=1) logits = tf.reduce_mean(input_tensor=logits, axis=1)
if spatial_squeeze: if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze') logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
......
...@@ -31,7 +31,7 @@ class I3DTest(tf.test.TestCase): ...@@ -31,7 +31,7 @@ class I3DTest(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3)) inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
logits, end_points = i3d.i3d(inputs, num_classes) logits, end_points = i3d.i3d(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -45,7 +45,7 @@ class I3DTest(tf.test.TestCase): ...@@ -45,7 +45,7 @@ class I3DTest(tf.test.TestCase):
num_frames = 64 num_frames = 64
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3)) inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_6c, end_points = i3d.i3d_base(inputs) mixed_6c, end_points = i3d.i3d_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c')) self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(), self.assertListEqual(mixed_6c.get_shape().as_list(),
...@@ -68,7 +68,7 @@ class I3DTest(tf.test.TestCase): ...@@ -68,7 +68,7 @@ class I3DTest(tf.test.TestCase):
'Mixed_5c'] 'Mixed_5c']
for index, endpoint in enumerate(endpoints): for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default(): with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3)) inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
out_tensor, end_points = i3d.i3d_base( out_tensor, end_points = i3d.i3d_base(
inputs, final_endpoint=endpoint) inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith( self.assertTrue(out_tensor.op.name.startswith(
...@@ -80,7 +80,7 @@ class I3DTest(tf.test.TestCase): ...@@ -80,7 +80,7 @@ class I3DTest(tf.test.TestCase):
num_frames = 64 num_frames = 64
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3)) inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
_, end_points = i3d.i3d_base(inputs, _, end_points = i3d.i3d_base(inputs,
final_endpoint='Mixed_5c') final_endpoint='Mixed_5c')
endpoints_shapes = {'Conv2d_1a_7x7': [5, 32, 112, 112, 64], endpoints_shapes = {'Conv2d_1a_7x7': [5, 32, 112, 112, 64],
...@@ -111,7 +111,7 @@ class I3DTest(tf.test.TestCase): ...@@ -111,7 +111,7 @@ class I3DTest(tf.test.TestCase):
num_frames = 64 num_frames = 64
height, width = 112, 112 height, width = 112, 112
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3)) inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_5c, _ = i3d.i3d_base(inputs) mixed_5c, _ = i3d.i3d_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c')) self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(), self.assertListEqual(mixed_5c.get_shape().as_list(),
...@@ -122,7 +122,7 @@ class I3DTest(tf.test.TestCase): ...@@ -122,7 +122,7 @@ class I3DTest(tf.test.TestCase):
num_frames = 10 num_frames = 10
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, num_frames, height, width, 3)) inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
mixed_5c, _ = i3d.i3d_base(inputs) mixed_5c, _ = i3d.i3d_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c')) self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(), self.assertListEqual(mixed_5c.get_shape().as_list(),
...@@ -134,13 +134,13 @@ class I3DTest(tf.test.TestCase): ...@@ -134,13 +134,13 @@ class I3DTest(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, num_frames, height, width, 3)) eval_inputs = tf.random.uniform((batch_size, num_frames, height, width, 3))
logits, _ = i3d.i3d(eval_inputs, num_classes, logits, _ = i3d.i3d(eval_inputs, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
......
...@@ -228,13 +228,13 @@ def inception_block_v1_3d(inputs, ...@@ -228,13 +228,13 @@ def inception_block_v1_3d(inputs,
""" """
use_gating = self_gating_fn is not None use_gating = self_gating_fn is not None
with tf.variable_scope(scope): with tf.compat.v1.variable_scope(scope):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = layers.conv3d( branch_0 = layers.conv3d(
inputs, num_outputs_0_0a, [1, 1, 1], scope='Conv2d_0a_1x1') inputs, num_outputs_0_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
if use_gating: if use_gating:
branch_0 = self_gating_fn(branch_0, scope='Conv2d_0a_1x1') branch_0 = self_gating_fn(branch_0, scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = layers.conv3d( branch_1 = layers.conv3d(
inputs, num_outputs_1_0a, [1, 1, 1], scope='Conv2d_0a_1x1') inputs, num_outputs_1_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_1 = conv3d_spatiotemporal( branch_1 = conv3d_spatiotemporal(
...@@ -242,7 +242,7 @@ def inception_block_v1_3d(inputs, ...@@ -242,7 +242,7 @@ def inception_block_v1_3d(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
if use_gating: if use_gating:
branch_1 = self_gating_fn(branch_1, scope='Conv2d_0b_3x3') branch_1 = self_gating_fn(branch_1, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = layers.conv3d( branch_2 = layers.conv3d(
inputs, num_outputs_2_0a, [1, 1, 1], scope='Conv2d_0a_1x1') inputs, num_outputs_2_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_2 = conv3d_spatiotemporal( branch_2 = conv3d_spatiotemporal(
...@@ -250,7 +250,7 @@ def inception_block_v1_3d(inputs, ...@@ -250,7 +250,7 @@ def inception_block_v1_3d(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
if use_gating: if use_gating:
branch_2 = self_gating_fn(branch_2, scope='Conv2d_0b_3x3') branch_2 = self_gating_fn(branch_2, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = layers.max_pool3d(inputs, [3, 3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.max_pool3d(inputs, [3, 3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv3d( branch_3 = layers.conv3d(
branch_3, num_outputs_3_0b, [1, 1, 1], scope='Conv2d_0b_1x1') branch_3, num_outputs_3_0b, [1, 1, 1], scope='Conv2d_0b_1x1')
......
...@@ -33,13 +33,13 @@ slim = contrib_slim ...@@ -33,13 +33,13 @@ slim = contrib_slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block.""" """Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse): with tf.compat.v1.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1') tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3') tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3') tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3') tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
...@@ -59,10 +59,10 @@ def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): ...@@ -59,10 +59,10 @@ def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block.""" """Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): with tf.compat.v1.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7') scope='Conv2d_0b_1x7')
...@@ -85,10 +85,10 @@ def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): ...@@ -85,10 +85,10 @@ def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block.""" """Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse): with tf.compat.v1.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1') tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3') scope='Conv2d_0b_1x3')
...@@ -155,7 +155,7 @@ def inception_resnet_v2_base(inputs, ...@@ -155,7 +155,7 @@ def inception_resnet_v2_base(inputs,
end_points[name] = net end_points[name] = net
return name == final_endpoint return name == final_endpoint
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]): with tf.compat.v1.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
# 149 x 149 x 32 # 149 x 149 x 32
...@@ -188,20 +188,20 @@ def inception_resnet_v2_base(inputs, ...@@ -188,20 +188,20 @@ def inception_resnet_v2_base(inputs,
if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points
# 35 x 35 x 320 # 35 x 35 x 320
with tf.variable_scope('Mixed_5b'): with tf.compat.v1.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1') tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1') tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5, tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5') scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1') tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3, tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3, tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME', tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3') scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1, tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
...@@ -218,12 +218,12 @@ def inception_resnet_v2_base(inputs, ...@@ -218,12 +218,12 @@ def inception_resnet_v2_base(inputs,
# 33 x 33 x 1088 if output_stride == 16 # 33 x 33 x 1088 if output_stride == 16
use_atrous = output_stride == 8 use_atrous = output_stride == 8
with tf.variable_scope('Mixed_6a'): with tf.compat.v1.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2, tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2,
padding=padding, padding=padding,
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3, tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
...@@ -231,7 +231,7 @@ def inception_resnet_v2_base(inputs, ...@@ -231,7 +231,7 @@ def inception_resnet_v2_base(inputs,
stride=1 if use_atrous else 2, stride=1 if use_atrous else 2,
padding=padding, padding=padding,
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2, tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2,
padding=padding, padding=padding,
scope='MaxPool_1a_3x3') scope='MaxPool_1a_3x3')
...@@ -251,25 +251,25 @@ def inception_resnet_v2_base(inputs, ...@@ -251,25 +251,25 @@ def inception_resnet_v2_base(inputs,
'PreAuxlogits end_point for now.') 'PreAuxlogits end_point for now.')
# 8 x 8 x 2080 # 8 x 8 x 2080
with tf.variable_scope('Mixed_7a'): with tf.compat.v1.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2, tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding=padding, padding=padding,
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2, tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding=padding, padding=padding,
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2, tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding=padding, padding=padding,
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, tower_pool = slim.max_pool2d(net, 3, stride=2,
padding=padding, padding=padding,
scope='MaxPool_1a_3x3') scope='MaxPool_1a_3x3')
...@@ -320,8 +320,8 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True, ...@@ -320,8 +320,8 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
""" """
end_points = {} end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], with tf.compat.v1.variable_scope(
reuse=reuse) as scope: scope, 'InceptionResnetV2', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training): is_training=is_training):
...@@ -329,7 +329,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True, ...@@ -329,7 +329,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
activation_fn=activation_fn) activation_fn=activation_fn)
if create_aux_logits and num_classes: if create_aux_logits and num_classes:
with tf.variable_scope('AuxLogits'): with tf.compat.v1.variable_scope('AuxLogits'):
aux = end_points['PreAuxLogits'] aux = end_points['PreAuxLogits']
aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID', aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
...@@ -341,7 +341,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True, ...@@ -341,7 +341,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
scope='Logits') scope='Logits')
end_points['AuxLogits'] = aux end_points['AuxLogits'] = aux
with tf.variable_scope('Logits'): with tf.compat.v1.variable_scope('Logits'):
# TODO(sguada,arnoegw): Consider adding a parameter global_pool which # TODO(sguada,arnoegw): Consider adding a parameter global_pool which
# can be set to False to disable pooling here (as in resnet_*()). # can be set to False to disable pooling here (as in resnet_*()).
kernel_size = net.get_shape()[1:3] kernel_size = net.get_shape()[1:3]
...@@ -349,7 +349,8 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True, ...@@ -349,7 +349,8 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
net = slim.avg_pool2d(net, kernel_size, padding='VALID', net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_8x8') scope='AvgPool_1a_8x8')
else: else:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool') net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net end_points['global_pool'] = net
if not num_classes: if not num_classes:
return net, end_points return net, end_points
...@@ -371,7 +372,7 @@ def inception_resnet_v2_arg_scope( ...@@ -371,7 +372,7 @@ def inception_resnet_v2_arg_scope(
batch_norm_decay=0.9997, batch_norm_decay=0.9997,
batch_norm_epsilon=0.001, batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu, activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS, batch_norm_updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS,
batch_norm_scale=False): batch_norm_scale=False):
"""Returns the scope with the default parameters for inception_resnet_v2. """Returns the scope with the default parameters for inception_resnet_v2.
......
...@@ -30,7 +30,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -30,7 +30,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_resnet_v2(inputs, num_classes) logits, endpoints = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('AuxLogits' in endpoints) self.assertTrue('AuxLogits' in endpoints)
auxlogits = endpoints['AuxLogits'] auxlogits = endpoints['AuxLogits']
...@@ -47,7 +47,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -47,7 +47,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_resnet_v2(inputs, num_classes, logits, endpoints = inception.inception_resnet_v2(inputs, num_classes,
create_aux_logits=False) create_aux_logits=False)
self.assertTrue('AuxLogits' not in endpoints) self.assertTrue('AuxLogits' not in endpoints)
...@@ -60,7 +60,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -60,7 +60,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = None num_classes = None
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, endpoints = inception.inception_resnet_v2(inputs, num_classes) net, endpoints = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('AuxLogits' not in endpoints) self.assertTrue('AuxLogits' not in endpoints)
self.assertTrue('Logits' not in endpoints) self.assertTrue('Logits' not in endpoints)
...@@ -73,7 +73,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -73,7 +73,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2(inputs, num_classes) _, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('Logits' in end_points) self.assertTrue('Logits' in end_points)
logits = end_points['Logits'] logits = end_points['Logits']
...@@ -91,7 +91,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -91,7 +91,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_resnet_v2_base(inputs) net, end_points = inception.inception_resnet_v2_base(inputs)
self.assertTrue(net.op.name.startswith('InceptionResnetV2/Conv2d_7b_1x1')) self.assertTrue(net.op.name.startswith('InceptionResnetV2/Conv2d_7b_1x1'))
self.assertListEqual(net.get_shape().as_list(), self.assertListEqual(net.get_shape().as_list(),
...@@ -111,7 +111,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -111,7 +111,7 @@ class InceptionTest(tf.test.TestCase):
'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1'] 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
for index, endpoint in enumerate(endpoints): for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default(): with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_resnet_v2_base( out_tensor, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint=endpoint) inputs, final_endpoint=endpoint)
if endpoint != 'PreAuxLogits': if endpoint != 'PreAuxLogits':
...@@ -123,7 +123,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -123,7 +123,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base( _, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits') inputs, final_endpoint='PreAuxLogits')
endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32], endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32],
...@@ -149,7 +149,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -149,7 +149,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base( _, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits', align_feature_maps=True) inputs, final_endpoint='PreAuxLogits', align_feature_maps=True)
endpoints_shapes = {'Conv2d_1a_3x3': [5, 150, 150, 32], endpoints_shapes = {'Conv2d_1a_3x3': [5, 150, 150, 32],
...@@ -175,7 +175,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -175,7 +175,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base( _, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits', output_stride=8) inputs, final_endpoint='PreAuxLogits', output_stride=8)
endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32], endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32],
...@@ -202,15 +202,17 @@ class InceptionTest(tf.test.TestCase): ...@@ -202,15 +202,17 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device. # Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): with tf.compat.v1.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_resnet_v2(inputs, num_classes) inception.inception_resnet_v2(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): with tf.compat.v1.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_resnet_v2(inputs, num_classes) inception.inception_resnet_v2(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0') self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0') self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self): def testHalfSizeImages(self):
...@@ -218,7 +220,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -218,7 +220,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 150, 150 height, width = 150, 150
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_resnet_v2(inputs, num_classes) logits, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits')) self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -232,7 +234,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -232,7 +234,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 330, 400 height, width = 330, 400
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_resnet_v2(inputs, num_classes) logits, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits')) self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -246,15 +248,15 @@ class InceptionTest(tf.test.TestCase): ...@@ -246,15 +248,15 @@ class InceptionTest(tf.test.TestCase):
height, width = 330, 400 height, width = 330, 400
num_classes = 1000 num_classes = 1000
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (batch_size, None, None, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (batch_size, None, None, 3))
logits, end_points = inception.inception_resnet_v2( logits, end_points = inception.inception_resnet_v2(
inputs, num_classes, create_aux_logits=False) inputs, num_classes, create_aux_logits=False)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits')) self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1'] pre_pool = end_points['Conv2d_7b_1x1']
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
logits_out, pre_pool_out = sess.run([logits, pre_pool], logits_out, pre_pool_out = sess.run([logits, pre_pool],
{inputs: images.eval()}) {inputs: images.eval()})
self.assertTupleEqual(logits_out.shape, (batch_size, num_classes)) self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))
...@@ -265,13 +267,13 @@ class InceptionTest(tf.test.TestCase): ...@@ -265,13 +267,13 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_resnet_v2(inputs, num_classes) logits, _ = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits')) self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes]) [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -280,12 +282,12 @@ class InceptionTest(tf.test.TestCase): ...@@ -280,12 +282,12 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
with self.test_session() as sess: with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3)) eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs, logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -295,39 +297,40 @@ class InceptionTest(tf.test.TestCase): ...@@ -295,39 +297,40 @@ class InceptionTest(tf.test.TestCase):
height, width = 150, 150 height, width = 150, 150
num_classes = 1000 num_classes = 1000
with self.test_session() as sess: with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_resnet_v2(train_inputs, num_classes) inception.inception_resnet_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs, logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes, num_classes,
is_training=False, is_training=False,
reuse=True) reuse=True)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
def testNoBatchNormScaleByDefault(self): def testNoBatchNormScaleByDefault(self):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with contrib_slim.arg_scope(inception.inception_resnet_v2_arg_scope()): with contrib_slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
inception.inception_resnet_v2(inputs, num_classes, is_training=False) inception.inception_resnet_v2(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self): def testBatchNormScale(self):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with contrib_slim.arg_scope( with contrib_slim.arg_scope(
inception.inception_resnet_v2_arg_scope(batch_norm_scale=True)): inception.inception_resnet_v2_arg_scope(batch_norm_scale=True)):
inception.inception_resnet_v2(inputs, num_classes, is_training=False) inception.inception_resnet_v2(inputs, num_classes, is_training=False)
gamma_names = set( gamma_names = set(
v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$')) v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0) self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'): for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names) self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
...@@ -30,13 +30,14 @@ from tensorflow.contrib import slim as contrib_slim ...@@ -30,13 +30,14 @@ from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim slim = contrib_slim
def inception_arg_scope(weight_decay=0.00004, def inception_arg_scope(
use_batch_norm=True, weight_decay=0.00004,
batch_norm_decay=0.9997, use_batch_norm=True,
batch_norm_epsilon=0.001, batch_norm_decay=0.9997,
activation_fn=tf.nn.relu, batch_norm_epsilon=0.001,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS, activation_fn=tf.nn.relu,
batch_norm_scale=False): batch_norm_updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS,
batch_norm_scale=False):
"""Defines the default arg scope for inception models. """Defines the default arg scope for inception models.
Args: Args:
......
...@@ -24,7 +24,10 @@ from tensorflow.contrib import slim as contrib_slim ...@@ -24,7 +24,10 @@ from tensorflow.contrib import slim as contrib_slim
from nets import inception_utils from nets import inception_utils
slim = contrib_slim slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
def inception_v1_base(inputs, def inception_v1_base(inputs,
...@@ -59,7 +62,7 @@ def inception_v1_base(inputs, ...@@ -59,7 +62,7 @@ def inception_v1_base(inputs,
ValueError: if final_endpoint is not set to one of the predefined values. ValueError: if final_endpoint is not set to one of the predefined values.
""" """
end_points = {} end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]): with tf.compat.v1.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope( with slim.arg_scope(
[slim.conv2d, slim.fully_connected], [slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)): weights_initializer=trunc_normal(0.01)):
...@@ -94,16 +97,16 @@ def inception_v1_base(inputs, ...@@ -94,16 +97,16 @@ def inception_v1_base(inputs,
return net, end_points return net, end_points
end_point = 'Mixed_3b' end_point = 'Mixed_3b'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat( net = tf.concat(
...@@ -112,16 +115,16 @@ def inception_v1_base(inputs, ...@@ -112,16 +115,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c' end_point = 'Mixed_3c'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat( net = tf.concat(
...@@ -135,16 +138,16 @@ def inception_v1_base(inputs, ...@@ -135,16 +138,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b' end_point = 'Mixed_4b'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat( net = tf.concat(
...@@ -153,16 +156,16 @@ def inception_v1_base(inputs, ...@@ -153,16 +156,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c' end_point = 'Mixed_4c'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat( net = tf.concat(
...@@ -171,16 +174,16 @@ def inception_v1_base(inputs, ...@@ -171,16 +174,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d' end_point = 'Mixed_4d'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat( net = tf.concat(
...@@ -189,16 +192,16 @@ def inception_v1_base(inputs, ...@@ -189,16 +192,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e' end_point = 'Mixed_4e'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat( net = tf.concat(
...@@ -207,16 +210,16 @@ def inception_v1_base(inputs, ...@@ -207,16 +210,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f' end_point = 'Mixed_4f'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat( net = tf.concat(
...@@ -230,16 +233,16 @@ def inception_v1_base(inputs, ...@@ -230,16 +233,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b' end_point = 'Mixed_5b'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3') branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat( net = tf.concat(
...@@ -248,16 +251,16 @@ def inception_v1_base(inputs, ...@@ -248,16 +251,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c' end_point = 'Mixed_5c'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat( net = tf.concat(
...@@ -313,14 +316,16 @@ def inception_v1(inputs, ...@@ -313,14 +316,16 @@ def inception_v1(inputs,
activation. activation.
""" """
# Final pooling and prediction # Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV1', [inputs], reuse=reuse) as scope: with tf.compat.v1.variable_scope(
scope, 'InceptionV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training): is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope) net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'): with tf.compat.v1.variable_scope('Logits'):
if global_pool: if global_pool:
# Global average pooling. # Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool') net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net end_points['global_pool'] = net
else: else:
# Pooling with a fixed kernel size. # Pooling with a fixed kernel size.
......
...@@ -34,7 +34,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -34,7 +34,7 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v1(inputs, num_classes) logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith( self.assertTrue(logits.op.name.startswith(
'InceptionV1/Logits/SpatialSqueeze')) 'InceptionV1/Logits/SpatialSqueeze'))
...@@ -49,7 +49,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -49,7 +49,7 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = None num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v1(inputs, num_classes) net, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool')) self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024]) self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
...@@ -60,7 +60,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -60,7 +60,7 @@ class InceptionV1Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception.inception_v1_base(inputs) mixed_6c, end_points = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c')) self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(), self.assertListEqual(mixed_6c.get_shape().as_list(),
...@@ -82,7 +82,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -82,7 +82,7 @@ class InceptionV1Test(tf.test.TestCase):
'Mixed_5c'] 'Mixed_5c']
for index, endpoint in enumerate(endpoints): for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default(): with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v1_base( out_tensor, end_points = inception.inception_v1_base(
inputs, final_endpoint=endpoint) inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith( self.assertTrue(out_tensor.op.name.startswith(
...@@ -93,7 +93,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -93,7 +93,7 @@ class InceptionV1Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v1_base(inputs, _, end_points = inception.inception_v1_base(inputs,
final_endpoint='Mixed_5c') final_endpoint='Mixed_5c')
endpoints_shapes = { endpoints_shapes = {
...@@ -125,7 +125,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -125,7 +125,7 @@ class InceptionV1Test(tf.test.TestCase):
def testModelHasExpectedNumberOfParameters(self): def testModelHasExpectedNumberOfParameters(self):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()): with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs) inception.inception_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars( total_params, _ = slim.model_analyzer.analyze_vars(
...@@ -136,7 +136,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -136,7 +136,7 @@ class InceptionV1Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 112, 112 height, width = 112, 112
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
mixed_5c, _ = inception.inception_v1_base(inputs) mixed_5c, _ = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c')) self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(), self.assertListEqual(mixed_5c.get_shape().as_list(),
...@@ -147,7 +147,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -147,7 +147,7 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 28, 28 height, width = 28, 28
channels = 192 channels = 192
inputs = tf.random_uniform((batch_size, height, width, channels)) inputs = tf.random.uniform((batch_size, height, width, channels))
_, end_points = inception.inception_v1_base( _, end_points = inception.inception_v1_base(
inputs, include_root_block=False) inputs, include_root_block=False)
endpoints_shapes = { endpoints_shapes = {
...@@ -172,31 +172,33 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -172,31 +172,33 @@ class InceptionV1Test(tf.test.TestCase):
expected_shape) expected_shape)
def testUnknownImageShape(self): def testUnknownImageShape(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
batch_size = 2 batch_size = 2
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes) logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Mixed_5c'] pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self): def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
batch_size = 1 batch_size = 1
height, width = 250, 300 height, width = 250, 300
num_classes = 1000 num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes, logits, end_points = inception.inception_v1(inputs, num_classes,
global_pool=True) global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
...@@ -204,7 +206,7 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -204,7 +206,7 @@ class InceptionV1Test(tf.test.TestCase):
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Mixed_5c'] pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
...@@ -213,15 +215,15 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -213,15 +215,15 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v1(inputs, num_classes) logits, _ = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes]) [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -230,13 +232,13 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -230,13 +232,13 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3)) eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes, logits, _ = inception.inception_v1(eval_inputs, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -246,50 +248,51 @@ class InceptionV1Test(tf.test.TestCase): ...@@ -246,50 +248,51 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes) inception.inception_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True) logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self): def testLogitsNotSqueezed(self):
num_classes = 25 num_classes = 25
images = tf.random_uniform([1, 224, 224, 3]) images = tf.random.uniform([1, 224, 224, 3])
logits, _ = inception.inception_v1(images, logits, _ = inception.inception_v1(images,
num_classes=num_classes, num_classes=num_classes,
spatial_squeeze=False) spatial_squeeze=False)
with self.test_session() as sess: with self.test_session() as sess:
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
logits_out = sess.run(logits) logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self): def testNoBatchNormScaleByDefault(self):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()): with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1(inputs, num_classes, is_training=False) inception.inception_v1(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self): def testBatchNormScale(self):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope( with slim.arg_scope(
inception.inception_v1_arg_scope(batch_norm_scale=True)): inception.inception_v1_arg_scope(batch_norm_scale=True)):
inception.inception_v1(inputs, num_classes, is_training=False) inception.inception_v1(inputs, num_classes, is_training=False)
gamma_names = set( gamma_names = set(
v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$')) v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0) self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'): for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names) self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
...@@ -24,7 +24,10 @@ from tensorflow.contrib import slim as contrib_slim ...@@ -24,7 +24,10 @@ from tensorflow.contrib import slim as contrib_slim
from nets import inception_utils from nets import inception_utils
slim = contrib_slim slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
def inception_v2_base(inputs, def inception_v2_base(inputs,
...@@ -92,7 +95,7 @@ def inception_v2_base(inputs, ...@@ -92,7 +95,7 @@ def inception_v2_base(inputs,
) )
concat_dim = 3 if data_format == 'NHWC' else 1 concat_dim = 3 if data_format == 'NHWC' else 1
with tf.variable_scope(scope, 'InceptionV2', [inputs]): with tf.compat.v1.variable_scope(scope, 'InceptionV2', [inputs]):
with slim.arg_scope( with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d], [slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, stride=1,
...@@ -167,17 +170,17 @@ def inception_v2_base(inputs, ...@@ -167,17 +170,17 @@ def inception_v2_base(inputs,
# 28 x 28 x 192 # 28 x 28 x 192
# Inception module. # Inception module.
end_point = 'Mixed_3b' end_point = 'Mixed_3b'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
net, depth(64), [1, 1], net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [3, 3], branch_1 = slim.conv2d(branch_1, depth(64), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d( branch_2 = slim.conv2d(
net, depth(64), [1, 1], net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
...@@ -186,7 +189,7 @@ def inception_v2_base(inputs, ...@@ -186,7 +189,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d( branch_3 = slim.conv2d(
branch_3, depth(32), [1, 1], branch_3, depth(32), [1, 1],
...@@ -198,17 +201,17 @@ def inception_v2_base(inputs, ...@@ -198,17 +201,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# 28 x 28 x 256 # 28 x 28 x 256
end_point = 'Mixed_3c' end_point = 'Mixed_3c'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
net, depth(64), [1, 1], net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d( branch_2 = slim.conv2d(
net, depth(64), [1, 1], net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
...@@ -217,7 +220,7 @@ def inception_v2_base(inputs, ...@@ -217,7 +220,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d( branch_3 = slim.conv2d(
branch_3, depth(64), [1, 1], branch_3, depth(64), [1, 1],
...@@ -229,15 +232,15 @@ def inception_v2_base(inputs, ...@@ -229,15 +232,15 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# 28 x 28 x 320 # 28 x 28 x 320
end_point = 'Mixed_4a' end_point = 'Mixed_4a'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d( branch_0 = slim.conv2d(
net, depth(128), [1, 1], net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2, branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2,
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
net, depth(64), [1, 1], net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
...@@ -246,7 +249,7 @@ def inception_v2_base(inputs, ...@@ -246,7 +249,7 @@ def inception_v2_base(inputs,
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3') branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d( branch_2 = slim.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2]) net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2])
...@@ -254,17 +257,17 @@ def inception_v2_base(inputs, ...@@ -254,17 +257,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576 # 14 x 14 x 576
end_point = 'Mixed_4b' end_point = 'Mixed_4b'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
net, depth(64), [1, 1], net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d( branch_2 = slim.conv2d(
net, depth(96), [1, 1], net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
...@@ -273,7 +276,7 @@ def inception_v2_base(inputs, ...@@ -273,7 +276,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d( branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1], branch_3, depth(128), [1, 1],
...@@ -285,17 +288,17 @@ def inception_v2_base(inputs, ...@@ -285,17 +288,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576 # 14 x 14 x 576
end_point = 'Mixed_4c' end_point = 'Mixed_4c'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
net, depth(96), [1, 1], net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [3, 3], branch_1 = slim.conv2d(branch_1, depth(128), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d( branch_2 = slim.conv2d(
net, depth(96), [1, 1], net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
...@@ -304,7 +307,7 @@ def inception_v2_base(inputs, ...@@ -304,7 +307,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d( branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1], branch_3, depth(128), [1, 1],
...@@ -316,17 +319,17 @@ def inception_v2_base(inputs, ...@@ -316,17 +319,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576 # 14 x 14 x 576
end_point = 'Mixed_4d' end_point = 'Mixed_4d'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
net, depth(128), [1, 1], net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [3, 3], branch_1 = slim.conv2d(branch_1, depth(160), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d( branch_2 = slim.conv2d(
net, depth(128), [1, 1], net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
...@@ -335,7 +338,7 @@ def inception_v2_base(inputs, ...@@ -335,7 +338,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d( branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1], branch_3, depth(96), [1, 1],
...@@ -347,17 +350,17 @@ def inception_v2_base(inputs, ...@@ -347,17 +350,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576 # 14 x 14 x 576
end_point = 'Mixed_4e' end_point = 'Mixed_4e'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
net, depth(128), [1, 1], net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], branch_1 = slim.conv2d(branch_1, depth(192), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d( branch_2 = slim.conv2d(
net, depth(160), [1, 1], net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
...@@ -366,7 +369,7 @@ def inception_v2_base(inputs, ...@@ -366,7 +369,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d( branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1], branch_3, depth(96), [1, 1],
...@@ -378,15 +381,15 @@ def inception_v2_base(inputs, ...@@ -378,15 +381,15 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576 # 14 x 14 x 576
end_point = 'Mixed_5a' end_point = 'Mixed_5a'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d( branch_0 = slim.conv2d(
net, depth(128), [1, 1], net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
net, depth(192), [1, 1], net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
...@@ -395,7 +398,7 @@ def inception_v2_base(inputs, ...@@ -395,7 +398,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3') scope='MaxPool_1a_3x3')
net = tf.concat( net = tf.concat(
...@@ -404,17 +407,17 @@ def inception_v2_base(inputs, ...@@ -404,17 +407,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024 # 7 x 7 x 1024
end_point = 'Mixed_5b' end_point = 'Mixed_5b'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
net, depth(192), [1, 1], net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d( branch_2 = slim.conv2d(
net, depth(160), [1, 1], net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
...@@ -423,7 +426,7 @@ def inception_v2_base(inputs, ...@@ -423,7 +426,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d( branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1], branch_3, depth(128), [1, 1],
...@@ -435,17 +438,17 @@ def inception_v2_base(inputs, ...@@ -435,17 +438,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024 # 7 x 7 x 1024
end_point = 'Mixed_5c' end_point = 'Mixed_5c'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d( branch_1 = slim.conv2d(
net, depth(192), [1, 1], net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d( branch_2 = slim.conv2d(
net, depth(192), [1, 1], net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09), weights_initializer=trunc_normal(0.09),
...@@ -454,7 +457,7 @@ def inception_v2_base(inputs, ...@@ -454,7 +457,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d( branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1], branch_3, depth(128), [1, 1],
...@@ -525,16 +528,18 @@ def inception_v2(inputs, ...@@ -525,16 +528,18 @@ def inception_v2(inputs,
raise ValueError('depth_multiplier is not greater than zero.') raise ValueError('depth_multiplier is not greater than zero.')
# Final pooling and prediction # Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV2', [inputs], reuse=reuse) as scope: with tf.compat.v1.variable_scope(
scope, 'InceptionV2', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training): is_training=is_training):
net, end_points = inception_v2_base( net, end_points = inception_v2_base(
inputs, scope=scope, min_depth=min_depth, inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier) depth_multiplier=depth_multiplier)
with tf.variable_scope('Logits'): with tf.compat.v1.variable_scope('Logits'):
if global_pool: if global_pool:
# Global average pooling. # Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool') net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net end_points['global_pool'] = net
else: else:
# Pooling with a fixed kernel size. # Pooling with a fixed kernel size.
......
...@@ -34,7 +34,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -34,7 +34,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v2(inputs, num_classes) logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith( self.assertTrue(logits.op.name.startswith(
'InceptionV2/Logits/SpatialSqueeze')) 'InceptionV2/Logits/SpatialSqueeze'))
...@@ -49,7 +49,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -49,7 +49,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = None num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v2(inputs, num_classes) net, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV2/Logits/AvgPool')) self.assertTrue(net.op.name.startswith('InceptionV2/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024]) self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
...@@ -60,7 +60,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -60,7 +60,7 @@ class InceptionV2Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
mixed_5c, end_points = inception.inception_v2_base(inputs) mixed_5c, end_points = inception.inception_v2_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV2/Mixed_5c')) self.assertTrue(mixed_5c.op.name.startswith('InceptionV2/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(), self.assertListEqual(mixed_5c.get_shape().as_list(),
...@@ -70,7 +70,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -70,7 +70,7 @@ class InceptionV2Test(tf.test.TestCase):
'Mixed_5b', 'Mixed_5c', 'Conv2d_1a_7x7', 'Mixed_5b', 'Mixed_5c', 'Conv2d_1a_7x7',
'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3'] 'MaxPool_3a_3x3']
self.assertItemsEqual(end_points.keys(), expected_endpoints) self.assertItemsEqual(list(end_points.keys()), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self): def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5 batch_size = 5
...@@ -81,18 +81,18 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -81,18 +81,18 @@ class InceptionV2Test(tf.test.TestCase):
'Mixed_5a', 'Mixed_5b', 'Mixed_5c'] 'Mixed_5a', 'Mixed_5b', 'Mixed_5c']
for index, endpoint in enumerate(endpoints): for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default(): with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v2_base( out_tensor, end_points = inception.inception_v2_base(
inputs, final_endpoint=endpoint) inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith( self.assertTrue(out_tensor.op.name.startswith(
'InceptionV2/' + endpoint)) 'InceptionV2/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys()) self.assertItemsEqual(endpoints[:index + 1], list(end_points.keys()))
def testBuildAndCheckAllEndPointsUptoMixed5c(self): def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs, _, end_points = inception.inception_v2_base(inputs,
final_endpoint='Mixed_5c') final_endpoint='Mixed_5c')
endpoints_shapes = {'Mixed_3b': [batch_size, 28, 28, 256], endpoints_shapes = {'Mixed_3b': [batch_size, 28, 28, 256],
...@@ -110,7 +110,8 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -110,7 +110,8 @@ class InceptionV2Test(tf.test.TestCase):
'Conv2d_2b_1x1': [batch_size, 56, 56, 64], 'Conv2d_2b_1x1': [batch_size, 56, 56, 64],
'Conv2d_2c_3x3': [batch_size, 56, 56, 192], 'Conv2d_2c_3x3': [batch_size, 56, 56, 192],
'MaxPool_3a_3x3': [batch_size, 28, 28, 192]} 'MaxPool_3a_3x3': [batch_size, 28, 28, 192]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) self.assertItemsEqual(
list(endpoints_shapes.keys()), list(end_points.keys()))
for endpoint_name in endpoints_shapes: for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name] expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points) self.assertTrue(endpoint_name in end_points)
...@@ -120,7 +121,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -120,7 +121,7 @@ class InceptionV2Test(tf.test.TestCase):
def testModelHasExpectedNumberOfParameters(self): def testModelHasExpectedNumberOfParameters(self):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v2_arg_scope()): with slim.arg_scope(inception.inception_v2_arg_scope()):
inception.inception_v2_base(inputs) inception.inception_v2_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars( total_params, _ = slim.model_analyzer.analyze_vars(
...@@ -132,7 +133,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -132,7 +133,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2(inputs, num_classes) _, end_points = inception.inception_v2(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() endpoint_keys = [key for key in end_points.keys()
...@@ -152,7 +153,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -152,7 +153,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2(inputs, num_classes) _, end_points = inception.inception_v2(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() endpoint_keys = [key for key in end_points.keys()
...@@ -172,7 +173,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -172,7 +173,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
_ = inception.inception_v2(inputs, num_classes, depth_multiplier=-0.1) _ = inception.inception_v2(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
...@@ -182,7 +183,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -182,7 +183,7 @@ class InceptionV2Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs) _, end_points = inception.inception_v2_base(inputs)
endpoint_keys = [ endpoint_keys = [
...@@ -205,7 +206,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -205,7 +206,7 @@ class InceptionV2Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs) _, end_points = inception.inception_v2_base(inputs)
endpoint_keys = [ endpoint_keys = [
...@@ -213,7 +214,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -213,7 +214,7 @@ class InceptionV2Test(tf.test.TestCase):
if key.startswith('Mixed') or key.startswith('Conv') if key.startswith('Mixed') or key.startswith('Conv')
] ]
inputs_in_nchw = tf.random_uniform((batch_size, 3, height, width)) inputs_in_nchw = tf.random.uniform((batch_size, 3, height, width))
_, end_points_with_replacement = inception.inception_v2_base( _, end_points_with_replacement = inception.inception_v2_base(
inputs_in_nchw, use_separable_conv=False, data_format='NCHW') inputs_in_nchw, use_separable_conv=False, data_format='NCHW')
...@@ -221,7 +222,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -221,7 +222,7 @@ class InceptionV2Test(tf.test.TestCase):
# shape from the original shape with the 'NHWC' layout. # shape from the original shape with the 'NHWC' layout.
for key in endpoint_keys: for key in endpoint_keys:
transposed_original_shape = tf.transpose( transposed_original_shape = tf.transpose(
end_points[key], [0, 3, 1, 2]).get_shape().as_list() a=end_points[key], perm=[0, 3, 1, 2]).get_shape().as_list()
self.assertTrue(key in end_points_with_replacement) self.assertTrue(key in end_points_with_replacement)
new_shape = end_points_with_replacement[key].get_shape().as_list() new_shape = end_points_with_replacement[key].get_shape().as_list()
self.assertListEqual(transposed_original_shape, new_shape) self.assertListEqual(transposed_original_shape, new_shape)
...@@ -230,7 +231,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -230,7 +231,7 @@ class InceptionV2Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
# 'NCWH' data format is not supported. # 'NCWH' data format is not supported.
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
...@@ -245,7 +246,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -245,7 +246,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 112, 112 height, width = 112, 112
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v2(inputs, num_classes) logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -259,7 +260,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -259,7 +260,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 28, 28 height, width = 28, 28
channels = 192 channels = 192
inputs = tf.random_uniform((batch_size, height, width, channels)) inputs = tf.random.uniform((batch_size, height, width, channels))
_, end_points = inception.inception_v2_base( _, end_points = inception.inception_v2_base(
inputs, include_root_block=False) inputs, include_root_block=False)
endpoints_shapes = { endpoints_shapes = {
...@@ -274,7 +275,8 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -274,7 +275,8 @@ class InceptionV2Test(tf.test.TestCase):
'Mixed_5b': [batch_size, 7, 7, 1024], 'Mixed_5b': [batch_size, 7, 7, 1024],
'Mixed_5c': [batch_size, 7, 7, 1024] 'Mixed_5c': [batch_size, 7, 7, 1024]
} }
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) self.assertItemsEqual(
list(endpoints_shapes.keys()), list(end_points.keys()))
for endpoint_name in endpoints_shapes: for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name] expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points) self.assertTrue(endpoint_name in end_points)
...@@ -282,31 +284,33 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -282,31 +284,33 @@ class InceptionV2Test(tf.test.TestCase):
expected_shape) expected_shape)
def testUnknownImageShape(self): def testUnknownImageShape(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
batch_size = 2 batch_size = 2
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v2(inputs, num_classes) logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Mixed_5c'] pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self): def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
batch_size = 1 batch_size = 1
height, width = 250, 300 height, width = 250, 300
num_classes = 1000 num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v2(inputs, num_classes, logits, end_points = inception.inception_v2(inputs, num_classes,
global_pool=True) global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
...@@ -314,7 +318,7 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -314,7 +318,7 @@ class InceptionV2Test(tf.test.TestCase):
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Mixed_5c'] pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
...@@ -323,15 +327,15 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -323,15 +327,15 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v2(inputs, num_classes) logits, _ = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes]) [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -340,13 +344,13 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -340,13 +344,13 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3)) eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes, logits, _ = inception.inception_v2(eval_inputs, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -356,50 +360,51 @@ class InceptionV2Test(tf.test.TestCase): ...@@ -356,50 +360,51 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 150, 150 height, width = 150, 150
num_classes = 1000 num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_v2(train_inputs, num_classes) inception.inception_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True) logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self): def testLogitsNotSqueezed(self):
num_classes = 25 num_classes = 25
images = tf.random_uniform([1, 224, 224, 3]) images = tf.random.uniform([1, 224, 224, 3])
logits, _ = inception.inception_v2(images, logits, _ = inception.inception_v2(images,
num_classes=num_classes, num_classes=num_classes,
spatial_squeeze=False) spatial_squeeze=False)
with self.test_session() as sess: with self.test_session() as sess:
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
logits_out = sess.run(logits) logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self): def testNoBatchNormScaleByDefault(self):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v2_arg_scope()): with slim.arg_scope(inception.inception_v2_arg_scope()):
inception.inception_v2(inputs, num_classes, is_training=False) inception.inception_v2(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self): def testBatchNormScale(self):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope( with slim.arg_scope(
inception.inception_v2_arg_scope(batch_norm_scale=True)): inception.inception_v2_arg_scope(batch_norm_scale=True)):
inception.inception_v2(inputs, num_classes, is_training=False) inception.inception_v2(inputs, num_classes, is_training=False)
gamma_names = set( gamma_names = set(
v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$')) v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0) self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'): for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names) self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
...@@ -24,7 +24,10 @@ from tensorflow.contrib import slim as contrib_slim ...@@ -24,7 +24,10 @@ from tensorflow.contrib import slim as contrib_slim
from nets import inception_utils from nets import inception_utils
slim = contrib_slim slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
def inception_v3_base(inputs, def inception_v3_base(inputs,
...@@ -97,7 +100,7 @@ def inception_v3_base(inputs, ...@@ -97,7 +100,7 @@ def inception_v3_base(inputs,
raise ValueError('depth_multiplier is not greater than zero.') raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth) depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs]): with tf.compat.v1.variable_scope(scope, 'InceptionV3', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='VALID'): stride=1, padding='VALID'):
# 299 x 299 x 3 # 299 x 299 x 3
...@@ -142,20 +145,20 @@ def inception_v3_base(inputs, ...@@ -142,20 +145,20 @@ def inception_v3_base(inputs,
stride=1, padding='SAME'): stride=1, padding='SAME'):
# mixed: 35 x 35 x 256. # mixed: 35 x 35 x 256.
end_point = 'Mixed_5b' end_point = 'Mixed_5b'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5], branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5') scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1], branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
scope='Conv2d_0b_1x1') scope='Conv2d_0b_1x1')
...@@ -165,21 +168,21 @@ def inception_v3_base(inputs, ...@@ -165,21 +168,21 @@ def inception_v3_base(inputs,
# mixed_1: 35 x 35 x 288. # mixed_1: 35 x 35 x 288.
end_point = 'Mixed_5c' end_point = 'Mixed_5c'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1') branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5], branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv_1_0c_5x5') scope='Conv_1_0c_5x5')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], branch_2 = slim.conv2d(net, depth(64), [1, 1],
scope='Conv2d_0a_1x1') scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1], branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1') scope='Conv2d_0b_1x1')
...@@ -189,20 +192,20 @@ def inception_v3_base(inputs, ...@@ -189,20 +192,20 @@ def inception_v3_base(inputs,
# mixed_2: 35 x 35 x 288. # mixed_2: 35 x 35 x 288.
end_point = 'Mixed_5d' end_point = 'Mixed_5d'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5], branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5') scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3') scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1], branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1') scope='Conv2d_0b_1x1')
...@@ -212,17 +215,17 @@ def inception_v3_base(inputs, ...@@ -212,17 +215,17 @@ def inception_v3_base(inputs,
# mixed_3: 17 x 17 x 768. # mixed_3: 17 x 17 x 768.
end_point = 'Mixed_6a' end_point = 'Mixed_6a'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2, branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1') padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2, branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1') padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3') scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
...@@ -231,16 +234,16 @@ def inception_v3_base(inputs, ...@@ -231,16 +234,16 @@ def inception_v3_base(inputs,
# mixed4: 17 x 17 x 768. # mixed4: 17 x 17 x 768.
end_point = 'Mixed_6b' end_point = 'Mixed_6b'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [1, 7], branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],
scope='Conv2d_0b_1x7') scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1], branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1') scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1], branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0b_7x1') scope='Conv2d_0b_7x1')
...@@ -250,7 +253,7 @@ def inception_v3_base(inputs, ...@@ -250,7 +253,7 @@ def inception_v3_base(inputs,
scope='Conv2d_0d_7x1') scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7], branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7') scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1') scope='Conv2d_0b_1x1')
...@@ -260,16 +263,16 @@ def inception_v3_base(inputs, ...@@ -260,16 +263,16 @@ def inception_v3_base(inputs,
# mixed_5: 17 x 17 x 768. # mixed_5: 17 x 17 x 768.
end_point = 'Mixed_6c' end_point = 'Mixed_6c'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7], branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7') scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1], branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1') scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1], branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1') scope='Conv2d_0b_7x1')
...@@ -279,7 +282,7 @@ def inception_v3_base(inputs, ...@@ -279,7 +282,7 @@ def inception_v3_base(inputs,
scope='Conv2d_0d_7x1') scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7], branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7') scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1') scope='Conv2d_0b_1x1')
...@@ -288,16 +291,16 @@ def inception_v3_base(inputs, ...@@ -288,16 +291,16 @@ def inception_v3_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# mixed_6: 17 x 17 x 768. # mixed_6: 17 x 17 x 768.
end_point = 'Mixed_6d' end_point = 'Mixed_6d'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7], branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7') scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1], branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1') scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1], branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1') scope='Conv2d_0b_7x1')
...@@ -307,7 +310,7 @@ def inception_v3_base(inputs, ...@@ -307,7 +310,7 @@ def inception_v3_base(inputs,
scope='Conv2d_0d_7x1') scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7], branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7') scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1') scope='Conv2d_0b_1x1')
...@@ -317,16 +320,16 @@ def inception_v3_base(inputs, ...@@ -317,16 +320,16 @@ def inception_v3_base(inputs,
# mixed_7: 17 x 17 x 768. # mixed_7: 17 x 17 x 768.
end_point = 'Mixed_6e' end_point = 'Mixed_6e'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7], branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7') scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1], branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1') scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1], branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0b_7x1') scope='Conv2d_0b_7x1')
...@@ -336,7 +339,7 @@ def inception_v3_base(inputs, ...@@ -336,7 +339,7 @@ def inception_v3_base(inputs,
scope='Conv2d_0d_7x1') scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7], branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7') scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1') scope='Conv2d_0b_1x1')
...@@ -346,12 +349,12 @@ def inception_v3_base(inputs, ...@@ -346,12 +349,12 @@ def inception_v3_base(inputs,
# mixed_8: 8 x 8 x 1280. # mixed_8: 8 x 8 x 1280.
end_point = 'Mixed_7a' end_point = 'Mixed_7a'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2, branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3') padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7], branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7') scope='Conv2d_0b_1x7')
...@@ -359,7 +362,7 @@ def inception_v3_base(inputs, ...@@ -359,7 +362,7 @@ def inception_v3_base(inputs,
scope='Conv2d_0c_7x1') scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2, branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3') padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3') scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
...@@ -367,22 +370,22 @@ def inception_v3_base(inputs, ...@@ -367,22 +370,22 @@ def inception_v3_base(inputs,
if end_point == final_endpoint: return net, end_points if end_point == final_endpoint: return net, end_points
# mixed_9: 8 x 8 x 2048. # mixed_9: 8 x 8 x 2048.
end_point = 'Mixed_7b' end_point = 'Mixed_7b'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[ branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')]) slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d( branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3') branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[ branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')]) slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d( branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
...@@ -392,22 +395,22 @@ def inception_v3_base(inputs, ...@@ -392,22 +395,22 @@ def inception_v3_base(inputs,
# mixed_10: 8 x 8 x 2048. # mixed_10: 8 x 8 x 2048.
end_point = 'Mixed_7c' end_point = 'Mixed_7c'
with tf.variable_scope(end_point): with tf.compat.v1.variable_scope(end_point):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[ branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')]) slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d( branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3') branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[ branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')]) slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d( branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
...@@ -483,7 +486,8 @@ def inception_v3(inputs, ...@@ -483,7 +486,8 @@ def inception_v3(inputs,
raise ValueError('depth_multiplier is not greater than zero.') raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth) depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs], reuse=reuse) as scope: with tf.compat.v1.variable_scope(
scope, 'InceptionV3', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training): is_training=is_training):
net, end_points = inception_v3_base( net, end_points = inception_v3_base(
...@@ -495,7 +499,7 @@ def inception_v3(inputs, ...@@ -495,7 +499,7 @@ def inception_v3(inputs,
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e'] aux_logits = end_points['Mixed_6e']
with tf.variable_scope('AuxLogits'): with tf.compat.v1.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d( aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID', aux_logits, [5, 5], stride=3, padding='VALID',
scope='AvgPool_1a_5x5') scope='AvgPool_1a_5x5')
...@@ -518,10 +522,11 @@ def inception_v3(inputs, ...@@ -518,10 +522,11 @@ def inception_v3(inputs,
end_points['AuxLogits'] = aux_logits end_points['AuxLogits'] = aux_logits
# Final pooling and prediction # Final pooling and prediction
with tf.variable_scope('Logits'): with tf.compat.v1.variable_scope('Logits'):
if global_pool: if global_pool:
# Global average pooling. # Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='GlobalPool') net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='GlobalPool')
end_points['global_pool'] = net end_points['global_pool'] = net
else: else:
# Pooling with a fixed kernel size. # Pooling with a fixed kernel size.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment