Commit d4eedbb9 authored by Mark Sandler's avatar Mark Sandler Committed by Hongkun Yu
Browse files

Merged commit includes the following changes: (#8077)



Internal cleanup (py2->py3) plus the following changes:

285513318  by Sergio Guadarrama:

    Adds a script for post-training quantization

284222305  by Sergio Guadarrama:

    Modified squeeze-excite operation to accommodate tensors of undefined (Nonetype) H/W.

282028343  by Sergio Guadarrama:

    Add MobilenetV3 and MobilenetEdgeTPU to the slim/nets_factory.

PiperOrigin-RevId: 289455329
Co-authored-by: default avatarSergio Guadarrama <sguada@gmail.com>
parent 0e0a94a6
...@@ -34,7 +34,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -34,7 +34,7 @@ class InceptionV3Test(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes) logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith( self.assertTrue(logits.op.name.startswith(
'InceptionV3/Logits/SpatialSqueeze')) 'InceptionV3/Logits/SpatialSqueeze'))
...@@ -49,7 +49,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -49,7 +49,7 @@ class InceptionV3Test(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = None num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v3(inputs, num_classes) net, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV3/Logits/AvgPool')) self.assertTrue(net.op.name.startswith('InceptionV3/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 2048]) self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 2048])
...@@ -60,7 +60,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -60,7 +60,7 @@ class InceptionV3Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
final_endpoint, end_points = inception.inception_v3_base(inputs) final_endpoint, end_points = inception.inception_v3_base(inputs)
self.assertTrue(final_endpoint.op.name.startswith( self.assertTrue(final_endpoint.op.name.startswith(
'InceptionV3/Mixed_7c')) 'InceptionV3/Mixed_7c'))
...@@ -84,7 +84,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -84,7 +84,7 @@ class InceptionV3Test(tf.test.TestCase):
for index, endpoint in enumerate(endpoints): for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default(): with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v3_base( out_tensor, end_points = inception.inception_v3_base(
inputs, final_endpoint=endpoint) inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith( self.assertTrue(out_tensor.op.name.startswith(
...@@ -95,7 +95,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -95,7 +95,7 @@ class InceptionV3Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3_base( _, end_points = inception.inception_v3_base(
inputs, final_endpoint='Mixed_7c') inputs, final_endpoint='Mixed_7c')
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32], endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
...@@ -126,7 +126,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -126,7 +126,7 @@ class InceptionV3Test(tf.test.TestCase):
def testModelHasExpectedNumberOfParameters(self): def testModelHasExpectedNumberOfParameters(self):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v3_arg_scope()): with slim.arg_scope(inception.inception_v3_arg_scope()):
inception.inception_v3_base(inputs) inception.inception_v3_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars( total_params, _ = slim.model_analyzer.analyze_vars(
...@@ -138,7 +138,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -138,7 +138,7 @@ class InceptionV3Test(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes) _, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue('Logits' in end_points) self.assertTrue('Logits' in end_points)
logits = end_points['Logits'] logits = end_points['Logits']
...@@ -162,7 +162,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -162,7 +162,7 @@ class InceptionV3Test(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes) _, end_points = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() endpoint_keys = [key for key in end_points.keys()
...@@ -182,7 +182,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -182,7 +182,7 @@ class InceptionV3Test(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes) _, end_points = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() endpoint_keys = [key for key in end_points.keys()
...@@ -202,7 +202,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -202,7 +202,7 @@ class InceptionV3Test(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=-0.1) _ = inception.inception_v3(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
...@@ -213,7 +213,7 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -213,7 +213,7 @@ class InceptionV3Test(tf.test.TestCase):
height, width = 150, 150 height, width = 150, 150
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes) logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -223,37 +223,39 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -223,37 +223,39 @@ class InceptionV3Test(tf.test.TestCase):
[batch_size, 3, 3, 2048]) [batch_size, 3, 3, 2048])
def testUnknownImageShape(self): def testUnknownImageShape(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
batch_size = 2 batch_size = 2
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v3(inputs, num_classes) logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Mixed_7c'] pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def testGlobalPoolUnknownImageShape(self): def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
batch_size = 1 batch_size = 1
height, width = 330, 400 height, width = 330, 400
num_classes = 1000 num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v3(inputs, num_classes, logits, end_points = inception.inception_v3(inputs, num_classes,
global_pool=True) global_pool=True)
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Mixed_7c'] pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 11, 2048]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 11, 2048])
...@@ -262,15 +264,15 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -262,15 +264,15 @@ class InceptionV3Test(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v3(inputs, num_classes) logits, _ = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes]) [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -279,13 +281,13 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -279,13 +281,13 @@ class InceptionV3Test(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3)) eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes, logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -295,51 +297,52 @@ class InceptionV3Test(tf.test.TestCase): ...@@ -295,51 +297,52 @@ class InceptionV3Test(tf.test.TestCase):
height, width = 150, 150 height, width = 150, 150
num_classes = 1000 num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_v3(train_inputs, num_classes) inception.inception_v3(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes, logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False, reuse=True) is_training=False, reuse=True)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self): def testLogitsNotSqueezed(self):
num_classes = 25 num_classes = 25
images = tf.random_uniform([1, 299, 299, 3]) images = tf.random.uniform([1, 299, 299, 3])
logits, _ = inception.inception_v3(images, logits, _ = inception.inception_v3(images,
num_classes=num_classes, num_classes=num_classes,
spatial_squeeze=False) spatial_squeeze=False)
with self.test_session() as sess: with self.test_session() as sess:
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
logits_out = sess.run(logits) logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self): def testNoBatchNormScaleByDefault(self):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v3_arg_scope()): with slim.arg_scope(inception.inception_v3_arg_scope()):
inception.inception_v3(inputs, num_classes, is_training=False) inception.inception_v3(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self): def testBatchNormScale(self):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope( with slim.arg_scope(
inception.inception_v3_arg_scope(batch_norm_scale=True)): inception.inception_v3_arg_scope(batch_norm_scale=True)):
inception.inception_v3(inputs, num_classes, is_training=False) inception.inception_v3(inputs, num_classes, is_training=False)
gamma_names = set( gamma_names = set(
v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$')) v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0) self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'): for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names) self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
...@@ -37,17 +37,18 @@ def block_inception_a(inputs, scope=None, reuse=None): ...@@ -37,17 +37,18 @@ def block_inception_a(inputs, scope=None, reuse=None):
# By default use stride=1 and SAME padding # By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse): with tf.compat.v1.variable_scope(
with tf.variable_scope('Branch_0'): scope, 'BlockInceptionA', [inputs], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3') branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
...@@ -58,16 +59,17 @@ def block_reduction_a(inputs, scope=None, reuse=None): ...@@ -58,16 +59,17 @@ def block_reduction_a(inputs, scope=None, reuse=None):
# By default use stride=1 and SAME padding # By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse): with tf.compat.v1.variable_scope(
with tf.variable_scope('Branch_0'): scope, 'BlockReductionA', [inputs], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID', branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2, branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3') padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3') scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
...@@ -78,20 +80,21 @@ def block_inception_b(inputs, scope=None, reuse=None): ...@@ -78,20 +80,21 @@ def block_inception_b(inputs, scope=None, reuse=None):
# By default use stride=1 and SAME padding # By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse): with tf.compat.v1.variable_scope(
with tf.variable_scope('Branch_0'): scope, 'BlockInceptionB', [inputs], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7') branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1') branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1') branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7') branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1') branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7') branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
...@@ -102,18 +105,19 @@ def block_reduction_b(inputs, scope=None, reuse=None): ...@@ -102,18 +105,19 @@ def block_reduction_b(inputs, scope=None, reuse=None):
# By default use stride=1 and SAME padding # By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse): with tf.compat.v1.variable_scope(
with tf.variable_scope('Branch_0'): scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2, branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3') padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7') branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1') branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2, branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3') padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3') scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
...@@ -124,22 +128,23 @@ def block_inception_c(inputs, scope=None, reuse=None): ...@@ -124,22 +128,23 @@ def block_inception_c(inputs, scope=None, reuse=None):
# By default use stride=1 and SAME padding # By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse): with tf.compat.v1.variable_scope(
with tf.variable_scope('Branch_0'): scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[ branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')]) slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'): with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1') branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3') branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat(axis=3, values=[ branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'), slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')]) slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])
with tf.variable_scope('Branch_3'): with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1') branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
...@@ -171,7 +176,7 @@ def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None): ...@@ -171,7 +176,7 @@ def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
end_points[name] = net end_points[name] = net
return name == final_endpoint return name == final_endpoint
with tf.variable_scope(scope, 'InceptionV4', [inputs]): with tf.compat.v1.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
# 299 x 299 x 3 # 299 x 299 x 3
...@@ -186,23 +191,23 @@ def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None): ...@@ -186,23 +191,23 @@ def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3') net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 147 x 147 x 64 # 147 x 147 x 64
with tf.variable_scope('Mixed_3a'): with tf.compat.v1.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_0a_3x3') scope='MaxPool_0a_3x3')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID', branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',
scope='Conv2d_0a_3x3') scope='Conv2d_0a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1]) net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_3a', net): return net, end_points if add_and_check_final('Mixed_3a', net): return net, end_points
# 73 x 73 x 160 # 73 x 73 x 160
with tf.variable_scope('Mixed_4a'): with tf.compat.v1.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID', branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7') branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1') branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
...@@ -212,11 +217,11 @@ def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None): ...@@ -212,11 +217,11 @@ def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
if add_and_check_final('Mixed_4a', net): return net, end_points if add_and_check_final('Mixed_4a', net): return net, end_points
# 71 x 71 x 192 # 71 x 71 x 192
with tf.variable_scope('Mixed_5a'): with tf.compat.v1.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'): with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID', branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'): with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3') scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1]) net = tf.concat(axis=3, values=[branch_0, branch_1])
...@@ -281,7 +286,8 @@ def inception_v4(inputs, num_classes=1001, is_training=True, ...@@ -281,7 +286,8 @@ def inception_v4(inputs, num_classes=1001, is_training=True,
end_points: the set of end_points from the inception model. end_points: the set of end_points from the inception model.
""" """
end_points = {} end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope: with tf.compat.v1.variable_scope(
scope, 'InceptionV4', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training): is_training=is_training):
net, end_points = inception_v4_base(inputs, scope=scope) net, end_points = inception_v4_base(inputs, scope=scope)
...@@ -290,7 +296,7 @@ def inception_v4(inputs, num_classes=1001, is_training=True, ...@@ -290,7 +296,7 @@ def inception_v4(inputs, num_classes=1001, is_training=True,
stride=1, padding='SAME'): stride=1, padding='SAME'):
# Auxiliary Head logits # Auxiliary Head logits
if create_aux_logits and num_classes: if create_aux_logits and num_classes:
with tf.variable_scope('AuxLogits'): with tf.compat.v1.variable_scope('AuxLogits'):
# 17 x 17 x 1024 # 17 x 17 x 1024
aux_logits = end_points['Mixed_6h'] aux_logits = end_points['Mixed_6h']
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3, aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3,
...@@ -310,15 +316,18 @@ def inception_v4(inputs, num_classes=1001, is_training=True, ...@@ -310,15 +316,18 @@ def inception_v4(inputs, num_classes=1001, is_training=True,
# Final pooling and prediction # Final pooling and prediction
# TODO(sguada,arnoegw): Consider adding a parameter global_pool which # TODO(sguada,arnoegw): Consider adding a parameter global_pool which
# can be set to False to disable pooling here (as in resnet_*()). # can be set to False to disable pooling here (as in resnet_*()).
with tf.variable_scope('Logits'): with tf.compat.v1.variable_scope('Logits'):
# 8 x 8 x 1536 # 8 x 8 x 1536
kernel_size = net.get_shape()[1:3] kernel_size = net.get_shape()[1:3]
if kernel_size.is_fully_defined(): if kernel_size.is_fully_defined():
net = slim.avg_pool2d(net, kernel_size, padding='VALID', net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a') scope='AvgPool_1a')
else: else:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, net = tf.reduce_mean(
name='global_pool') input_tensor=net,
axis=[1, 2],
keepdims=True,
name='global_pool')
end_points['global_pool'] = net end_points['global_pool'] = net
if not num_classes: if not num_classes:
return net, end_points return net, end_points
......
...@@ -29,7 +29,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -29,7 +29,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes) logits, end_points = inception.inception_v4(inputs, num_classes)
auxlogits = end_points['AuxLogits'] auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions'] predictions = end_points['Predictions']
...@@ -48,7 +48,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -48,7 +48,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
num_classes = None num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4(inputs, num_classes) net, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV4/Logits/AvgPool')) self.assertTrue(net.op.name.startswith('InceptionV4/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1536]) self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1536])
...@@ -59,7 +59,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -59,7 +59,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_v4(inputs, num_classes, logits, endpoints = inception.inception_v4(inputs, num_classes,
create_aux_logits=False) create_aux_logits=False)
self.assertFalse('AuxLogits' in endpoints) self.assertFalse('AuxLogits' in endpoints)
...@@ -71,7 +71,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -71,7 +71,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v4(inputs, num_classes) _, end_points = inception.inception_v4(inputs, num_classes)
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32], endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32], 'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
...@@ -116,7 +116,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -116,7 +116,7 @@ class InceptionTest(tf.test.TestCase):
def testBuildBaseNetwork(self): def testBuildBaseNetwork(self):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4_base(inputs) net, end_points = inception.inception_v4_base(inputs)
self.assertTrue(net.op.name.startswith( self.assertTrue(net.op.name.startswith(
'InceptionV4/Mixed_7d')) 'InceptionV4/Mixed_7d'))
...@@ -142,7 +142,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -142,7 +142,7 @@ class InceptionTest(tf.test.TestCase):
'Mixed_7b', 'Mixed_7c', 'Mixed_7d'] 'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
for index, endpoint in enumerate(all_endpoints): for index, endpoint in enumerate(all_endpoints):
with tf.Graph().as_default(): with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v4_base( out_tensor, end_points = inception.inception_v4_base(
inputs, final_endpoint=endpoint) inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith( self.assertTrue(out_tensor.op.name.startswith(
...@@ -153,22 +153,24 @@ class InceptionTest(tf.test.TestCase): ...@@ -153,22 +153,24 @@ class InceptionTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device. # Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): with tf.compat.v1.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_v4(inputs, num_classes) inception.inception_v4(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): with tf.compat.v1.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_v4(inputs, num_classes) inception.inception_v4(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0') self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0') self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self): def testHalfSizeImages(self):
batch_size = 5 batch_size = 5
height, width = 150, 150 height, width = 150, 150
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes) logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -181,7 +183,7 @@ class InceptionTest(tf.test.TestCase): ...@@ -181,7 +183,7 @@ class InceptionTest(tf.test.TestCase):
batch_size = 1 batch_size = 1
height, width = 350, 400 height, width = 350, 400
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes) logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -195,15 +197,15 @@ class InceptionTest(tf.test.TestCase): ...@@ -195,15 +197,15 @@ class InceptionTest(tf.test.TestCase):
height, width = 350, 400 height, width = 350, 400
num_classes = 1000 num_classes = 1000
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (batch_size, None, None, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (batch_size, None, None, 3))
logits, end_points = inception.inception_v4( logits, end_points = inception.inception_v4(
inputs, num_classes, create_aux_logits=False) inputs, num_classes, create_aux_logits=False)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Mixed_7d'] pre_pool = end_points['Mixed_7d']
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
logits_out, pre_pool_out = sess.run([logits, pre_pool], logits_out, pre_pool_out = sess.run([logits, pre_pool],
{inputs: images.eval()}) {inputs: images.eval()})
self.assertTupleEqual(logits_out.shape, (batch_size, num_classes)) self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))
...@@ -214,13 +216,13 @@ class InceptionTest(tf.test.TestCase): ...@@ -214,13 +216,13 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v4(inputs, num_classes) logits, _ = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits')) self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes]) [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -229,12 +231,12 @@ class InceptionTest(tf.test.TestCase): ...@@ -229,12 +231,12 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
with self.test_session() as sess: with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3)) eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs, logits, _ = inception.inception_v4(eval_inputs,
num_classes, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -244,39 +246,40 @@ class InceptionTest(tf.test.TestCase): ...@@ -244,39 +246,40 @@ class InceptionTest(tf.test.TestCase):
height, width = 150, 150 height, width = 150, 150
num_classes = 1000 num_classes = 1000
with self.test_session() as sess: with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
inception.inception_v4(train_inputs, num_classes) inception.inception_v4(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs, logits, _ = inception.inception_v4(eval_inputs,
num_classes, num_classes,
is_training=False, is_training=False,
reuse=True) reuse=True)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
def testNoBatchNormScaleByDefault(self): def testNoBatchNormScaleByDefault(self):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with contrib_slim.arg_scope(inception.inception_v4_arg_scope()): with contrib_slim.arg_scope(inception.inception_v4_arg_scope()):
inception.inception_v4(inputs, num_classes, is_training=False) inception.inception_v4(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self): def testBatchNormScale(self):
height, width = 299, 299 height, width = 299, 299
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with contrib_slim.arg_scope( with contrib_slim.arg_scope(
inception.inception_v4_arg_scope(batch_norm_scale=True)): inception.inception_v4_arg_scope(batch_norm_scale=True)):
inception.inception_v4(inputs, num_classes, is_training=False) inception.inception_v4(inputs, num_classes, is_training=False)
gamma_names = set( gamma_names = set(
v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$')) v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0) self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'): for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names) self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
...@@ -59,7 +59,7 @@ def lenet(images, num_classes=10, is_training=False, ...@@ -59,7 +59,7 @@ def lenet(images, num_classes=10, is_training=False,
""" """
end_points = {} end_points = {}
with tf.variable_scope(scope, 'LeNet', [images]): with tf.compat.v1.variable_scope(scope, 'LeNet', [images]):
net = end_points['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1') net = end_points['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1')
net = end_points['pool1'] = slim.max_pool2d(net, [2, 2], 2, scope='pool1') net = end_points['pool1'] = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
net = end_points['conv2'] = slim.conv2d(net, 64, [5, 5], scope='conv2') net = end_points['conv2'] = slim.conv2d(net, 64, [5, 5], scope='conv2')
...@@ -93,6 +93,6 @@ def lenet_arg_scope(weight_decay=0.0): ...@@ -93,6 +93,6 @@ def lenet_arg_scope(weight_decay=0.0):
with slim.arg_scope( with slim.arg_scope(
[slim.conv2d, slim.fully_connected], [slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay), weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.1),
activation_fn=tf.nn.relu) as sc: activation_fn=tf.nn.relu) as sc:
return sc return sc
...@@ -78,9 +78,10 @@ def _split_divisible(num, num_ways, divisible_by=8): ...@@ -78,9 +78,10 @@ def _split_divisible(num, num_ways, divisible_by=8):
@contextlib.contextmanager @contextlib.contextmanager
def _v1_compatible_scope_naming(scope): def _v1_compatible_scope_naming(scope):
"""v1 compatible scope naming."""
if scope is None: # Create uniqified separable blocks. if scope is None: # Create uniqified separable blocks.
with tf.variable_scope(None, default_name='separable') as s, \ with tf.compat.v1.variable_scope(None, default_name='separable') as s, \
tf.name_scope(s.original_name_scope): tf.compat.v1.name_scope(s.original_name_scope):
yield '' yield ''
else: else:
# We use scope_depthwise, scope_pointwise for compatibility with V1 ckpts. # We use scope_depthwise, scope_pointwise for compatibility with V1 ckpts.
...@@ -299,8 +300,8 @@ def expanded_conv(input_tensor, ...@@ -299,8 +300,8 @@ def expanded_conv(input_tensor,
if depthwise_activation_fn is not None: if depthwise_activation_fn is not None:
dw_defaults['activation_fn'] = depthwise_activation_fn dw_defaults['activation_fn'] = depthwise_activation_fn
# pylint: disable=g-backslash-continuation # pylint: disable=g-backslash-continuation
with tf.variable_scope(scope, default_name='expanded_conv') as s, \ with tf.compat.v1.variable_scope(scope, default_name='expanded_conv') as s, \
tf.name_scope(s.original_name_scope), \ tf.compat.v1.name_scope(s.original_name_scope), \
slim.arg_scope((slim.conv2d,), **conv_defaults), \ slim.arg_scope((slim.conv2d,), **conv_defaults), \
slim.arg_scope((slim.separable_conv2d,), **dw_defaults): slim.arg_scope((slim.separable_conv2d,), **dw_defaults):
prev_depth = input_tensor.get_shape().as_list()[3] prev_depth = input_tensor.get_shape().as_list()[3]
...@@ -413,6 +414,11 @@ def squeeze_excite(input_tensor, ...@@ -413,6 +414,11 @@ def squeeze_excite(input_tensor,
pool=None): pool=None):
"""Squeeze excite block for Mobilenet V3. """Squeeze excite block for Mobilenet V3.
If the squeeze_input_tensor - or the input_tensor if squeeze_input_tensor is
None - contains variable dimensions (Nonetype in tensor shape), perform
average pooling (as the first step in the squeeze operation) by calling
reduce_mean across the H/W of the input tensor.
Args: Args:
input_tensor: input tensor to apply SE block to. input_tensor: input tensor to apply SE block to.
divisible_by: ensures all inner dimensions are divisible by this number. divisible_by: ensures all inner dimensions are divisible by this number.
...@@ -428,7 +434,7 @@ def squeeze_excite(input_tensor, ...@@ -428,7 +434,7 @@ def squeeze_excite(input_tensor,
Returns: Returns:
Gated input_tensor. (e.g. X * SE(X)) Gated input_tensor. (e.g. X * SE(X))
""" """
with tf.variable_scope('squeeze_excite'): with tf.compat.v1.variable_scope('squeeze_excite'):
if squeeze_input_tensor is None: if squeeze_input_tensor is None:
squeeze_input_tensor = input_tensor squeeze_input_tensor = input_tensor
input_size = input_tensor.shape.as_list()[1:3] input_size = input_tensor.shape.as_list()[1:3]
...@@ -441,10 +447,13 @@ def squeeze_excite(input_tensor, ...@@ -441,10 +447,13 @@ def squeeze_excite(input_tensor,
squeeze_channels = _make_divisible( squeeze_channels = _make_divisible(
input_channels / squeeze_factor, divisor=divisible_by) input_channels / squeeze_factor, divisor=divisible_by)
pooled = tf.nn.avg_pool(squeeze_input_tensor, if pool is None:
(1, pool_height, pool_width, 1), pooled = tf.reduce_mean(squeeze_input_tensor, axis=[1, 2], keepdims=True)
strides=(1, stride, stride, 1), else:
padding='VALID') pooled = tf.nn.avg_pool(
squeeze_input_tensor, (1, pool_height, pool_width, 1),
strides=(1, stride, stride, 1),
padding='VALID')
squeeze = slim.conv2d( squeeze = slim.conv2d(
pooled, pooled,
kernel_size=(1, 1), kernel_size=(1, 1),
......
...@@ -54,8 +54,10 @@ def _fixed_padding(inputs, kernel_size, rate=1): ...@@ -54,8 +54,10 @@ def _fixed_padding(inputs, kernel_size, rate=1):
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2] pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]], padded_inputs = tf.pad(
[pad_beg[1], pad_end[1]], [0, 0]]) tensor=inputs,
paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],
[0, 0]])
return padded_inputs return padded_inputs
...@@ -304,8 +306,8 @@ def mobilenet_base( # pylint: disable=invalid-name ...@@ -304,8 +306,8 @@ def mobilenet_base( # pylint: disable=invalid-name
@contextlib.contextmanager @contextlib.contextmanager
def _scope_all(scope, default_scope=None): def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\ with tf.compat.v1.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope): tf.compat.v1.name_scope(s.original_name_scope):
yield s yield s
...@@ -361,7 +363,7 @@ def mobilenet(inputs, ...@@ -361,7 +363,7 @@ def mobilenet(inputs,
if len(input_shape) != 4: if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape)) raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope: with tf.compat.v1.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input') inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args) net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only: if base_only:
...@@ -369,7 +371,7 @@ def mobilenet(inputs, ...@@ -369,7 +371,7 @@ def mobilenet(inputs,
net = tf.identity(net, name='embedding') net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'): with tf.compat.v1.variable_scope('Logits'):
net = global_pool(net) net = global_pool(net)
end_points['global_pool'] = net end_points['global_pool'] = net
if not num_classes: if not num_classes:
...@@ -382,7 +384,7 @@ def mobilenet(inputs, ...@@ -382,7 +384,7 @@ def mobilenet(inputs,
num_classes, [1, 1], num_classes, [1, 1],
activation_fn=None, activation_fn=None,
normalizer_fn=None, normalizer_fn=None,
biases_initializer=tf.zeros_initializer(), biases_initializer=tf.compat.v1.zeros_initializer(),
scope='Conv2d_1c_1x1') scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2]) logits = tf.squeeze(logits, [1, 2])
...@@ -394,7 +396,7 @@ def mobilenet(inputs, ...@@ -394,7 +396,7 @@ def mobilenet(inputs,
return logits, end_points return logits, end_points
def global_pool(input_tensor, pool_op=tf.nn.avg_pool): def global_pool(input_tensor, pool_op=tf.compat.v2.nn.avg_pool2d):
"""Applies avg pool to produce 1x1 output. """Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
...@@ -408,9 +410,11 @@ def global_pool(input_tensor, pool_op=tf.nn.avg_pool): ...@@ -408,9 +410,11 @@ def global_pool(input_tensor, pool_op=tf.nn.avg_pool):
""" """
shape = input_tensor.get_shape().as_list() shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None: if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor( kernel_size = tf.convert_to_tensor(value=[
[1, tf.shape(input_tensor)[1], 1,
tf.shape(input_tensor)[2], 1]) tf.shape(input=input_tensor)[1],
tf.shape(input=input_tensor)[2], 1
])
else: else:
kernel_size = [1, shape[1], shape[2], 1] kernel_size = [1, shape[1], shape[2], 1]
output = pool_op( output = pool_op(
...@@ -458,7 +462,8 @@ def training_scope(is_training=True, ...@@ -458,7 +462,8 @@ def training_scope(is_training=True,
if stddev < 0: if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer() weight_intitializer = slim.initializers.xavier_initializer()
else: else:
weight_intitializer = tf.truncated_normal_initializer(stddev=stddev) weight_intitializer = tf.compat.v1.truncated_normal_initializer(
stddev=stddev)
# Set weight_decay for weights in Conv and FC layers. # Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope( with slim.arg_scope(
......
...@@ -18,6 +18,7 @@ from __future__ import absolute_import ...@@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import copy import copy
from six.moves import range
import tensorflow as tf import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim from tensorflow.contrib import slim as contrib_slim
from nets.mobilenet import conv_blocks as ops from nets.mobilenet import conv_blocks as ops
...@@ -36,19 +37,20 @@ def find_ops(optype): ...@@ -36,19 +37,20 @@ def find_ops(optype):
Returns: Returns:
List of operations. List of operations.
""" """
gd = tf.get_default_graph() gd = tf.compat.v1.get_default_graph()
return [var for var in gd.get_operations() if var.type == optype] return [var for var in gd.get_operations() if var.type == optype]
class MobilenetV2Test(tf.test.TestCase): class MobilenetV2Test(tf.test.TestCase):
def setUp(self): def setUp(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
def testCreation(self): def testCreation(self):
spec = dict(mobilenet_v2.V2_DEF) spec = dict(mobilenet_v2.V2_DEF)
_, ep = mobilenet.mobilenet( _, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec) tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=spec)
num_convs = len(find_ops('Conv2D')) num_convs = len(find_ops('Conv2D'))
# This is mostly a sanity test. No deep reason for these particular # This is mostly a sanity test. No deep reason for these particular
...@@ -64,16 +66,17 @@ class MobilenetV2Test(tf.test.TestCase): ...@@ -64,16 +66,17 @@ class MobilenetV2Test(tf.test.TestCase):
def testCreationNoClasses(self): def testCreationNoClasses(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF) spec = copy.deepcopy(mobilenet_v2.V2_DEF)
net, ep = mobilenet.mobilenet( net, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec, tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=spec,
num_classes=None) num_classes=None)
self.assertIs(net, ep['global_pool']) self.assertIs(net, ep['global_pool'])
def testImageSizes(self): def testImageSizes(self):
for input_size, output_size in [(224, 7), (192, 6), (160, 5), for input_size, output_size in [(224, 7), (192, 6), (160, 5),
(128, 4), (96, 3)]: (128, 4), (96, 3)]:
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
_, ep = mobilenet_v2.mobilenet( _, ep = mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, input_size, input_size, 3))) tf.compat.v1.placeholder(tf.float32, (10, input_size, input_size, 3)))
self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3],
[output_size] * 2) [output_size] * 2)
...@@ -84,7 +87,8 @@ class MobilenetV2Test(tf.test.TestCase): ...@@ -84,7 +87,8 @@ class MobilenetV2Test(tf.test.TestCase):
(ops.expanded_conv,): dict(split_expansion=2), (ops.expanded_conv,): dict(split_expansion=2),
} }
_, _ = mobilenet.mobilenet( _, _ = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec) tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=spec)
num_convs = len(find_ops('Conv2D')) num_convs = len(find_ops('Conv2D'))
# All but 3 op has 3 conv operatore, the remainign 3 have one # All but 3 op has 3 conv operatore, the remainign 3 have one
# and there is one unaccounted. # and there is one unaccounted.
...@@ -92,16 +96,16 @@ class MobilenetV2Test(tf.test.TestCase): ...@@ -92,16 +96,16 @@ class MobilenetV2Test(tf.test.TestCase):
def testWithOutputStride8(self): def testWithOutputStride8(self):
out, _ = mobilenet.mobilenet_base( out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)), tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, conv_defs=mobilenet_v2.V2_DEF,
output_stride=8, output_stride=8,
scope='MobilenetV2') scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testDivisibleBy(self): def testDivisibleBy(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
mobilenet_v2.mobilenet( mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, conv_defs=mobilenet_v2.V2_DEF,
divisible_by=16, divisible_by=16,
min_depth=32) min_depth=32)
...@@ -111,25 +115,27 @@ class MobilenetV2Test(tf.test.TestCase): ...@@ -111,25 +115,27 @@ class MobilenetV2Test(tf.test.TestCase):
1001], s) 1001], s)
def testDivisibleByWithArgScope(self): def testDivisibleByWithArgScope(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works # Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided. # if no default min_depth is provided.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
mobilenet_v2.mobilenet( mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)), tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) conv_defs=mobilenet_v2.V2_DEF,
depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s) s = set(s)
self.assertSameElements(s, [32, 192, 128, 1001]) self.assertSameElements(s, [32, 192, 128, 1001])
def testFineGrained(self): def testFineGrained(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works # Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided. # if no default min_depth is provided.
mobilenet_v2.mobilenet( mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)), tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01, conv_defs=mobilenet_v2.V2_DEF,
depth_multiplier=0.01,
finegrain_classification_mode=True) finegrain_classification_mode=True)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s) s = set(s)
...@@ -137,18 +143,19 @@ class MobilenetV2Test(tf.test.TestCase): ...@@ -137,18 +143,19 @@ class MobilenetV2Test(tf.test.TestCase):
self.assertSameElements(s, [8, 48, 1001, 1280]) self.assertSameElements(s, [8, 48, 1001, 1280])
def testMobilenetBase(self): def testMobilenetBase(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
# Verifies that mobilenet_base returns pre-pooling layer. # Verifies that mobilenet_base returns pre-pooling layer.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
net, _ = mobilenet_v2.mobilenet_base( net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)), tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) conv_defs=mobilenet_v2.V2_DEF,
depth_multiplier=0.1)
self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128]) self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])
def testWithOutputStride16(self): def testWithOutputStride16(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
out, _ = mobilenet.mobilenet_base( out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)), tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, conv_defs=mobilenet_v2.V2_DEF,
output_stride=16) output_stride=16)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
...@@ -167,17 +174,18 @@ class MobilenetV2Test(tf.test.TestCase): ...@@ -167,17 +174,18 @@ class MobilenetV2Test(tf.test.TestCase):
multiplier_func=inverse_multiplier, multiplier_func=inverse_multiplier,
num_outputs=16) num_outputs=16)
_ = mobilenet_v2.mobilenet_base( _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)), tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=new_def, depth_multiplier=0.1) conv_defs=new_def,
depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
# Expect first layer to be 160 (16 / 0.1), and other layers # Expect first layer to be 160 (16 / 0.1), and other layers
# their max(original size * 0.1, 8) # their max(original size * 0.1, 8)
self.assertEqual([160, 8, 48, 8, 48], s[:5]) self.assertEqual([160, 8, 48, 8, 48], s[:5])
def testWithOutputStride8AndExplicitPadding(self): def testWithOutputStride8AndExplicitPadding(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
out, _ = mobilenet.mobilenet_base( out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)), tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, conv_defs=mobilenet_v2.V2_DEF,
output_stride=8, output_stride=8,
use_explicit_padding=True, use_explicit_padding=True,
...@@ -185,9 +193,9 @@ class MobilenetV2Test(tf.test.TestCase): ...@@ -185,9 +193,9 @@ class MobilenetV2Test(tf.test.TestCase):
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testWithOutputStride16AndExplicitPadding(self): def testWithOutputStride16AndExplicitPadding(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
out, _ = mobilenet.mobilenet_base( out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)), tf.compat.v1.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, conv_defs=mobilenet_v2.V2_DEF,
output_stride=16, output_stride=16,
use_explicit_padding=True) use_explicit_padding=True)
......
...@@ -45,7 +45,7 @@ _se4 = lambda expansion_tensor, input_tensor: squeeze_excite(expansion_tensor) ...@@ -45,7 +45,7 @@ _se4 = lambda expansion_tensor, input_tensor: squeeze_excite(expansion_tensor)
def hard_swish(x): def hard_swish(x):
with tf.name_scope('hard_swish'): with tf.compat.v1.name_scope('hard_swish'):
return x * tf.nn.relu6(x + np.float32(3)) * np.float32(1. / 6.) return x * tf.nn.relu6(x + np.float32(3)) * np.float32(1. / 6.)
...@@ -358,6 +358,12 @@ small = wrapped_partial(mobilenet, conv_defs=V3_SMALL) ...@@ -358,6 +358,12 @@ small = wrapped_partial(mobilenet, conv_defs=V3_SMALL)
edge_tpu = wrapped_partial(mobilenet, edge_tpu = wrapped_partial(mobilenet,
new_defaults={'scope': 'MobilenetEdgeTPU'}, new_defaults={'scope': 'MobilenetEdgeTPU'},
conv_defs=V3_EDGETPU) conv_defs=V3_EDGETPU)
edge_tpu_075 = wrapped_partial(
mobilenet,
new_defaults={'scope': 'MobilenetEdgeTPU'},
conv_defs=V3_EDGETPU,
depth_multiplier=0.75,
finegrain_classification_mode=True)
# Minimalistic model that does not have Squeeze Excite blocks, # Minimalistic model that does not have Squeeze Excite blocks,
# Hardswish, or 5x5 depthwise convolution. # Hardswish, or 5x5 depthwise convolution.
......
...@@ -28,23 +28,23 @@ class MobilenetV3Test(absltest.TestCase): ...@@ -28,23 +28,23 @@ class MobilenetV3Test(absltest.TestCase):
def setUp(self): def setUp(self):
super(MobilenetV3Test, self).setUp() super(MobilenetV3Test, self).setUp()
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
def testMobilenetV3Large(self): def testMobilenetV3Large(self):
logits, endpoints = mobilenet_v3.mobilenet( logits, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3))) tf.compat.v1.placeholder(tf.float32, (1, 224, 224, 3)))
self.assertEqual(endpoints['layer_19'].shape, [1, 1, 1, 1280]) self.assertEqual(endpoints['layer_19'].shape, [1, 1, 1, 1280])
self.assertEqual(logits.shape, [1, 1001]) self.assertEqual(logits.shape, [1, 1001])
def testMobilenetV3Small(self): def testMobilenetV3Small(self):
_, endpoints = mobilenet_v3.mobilenet( _, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)), tf.compat.v1.placeholder(tf.float32, (1, 224, 224, 3)),
conv_defs=mobilenet_v3.V3_SMALL) conv_defs=mobilenet_v3.V3_SMALL)
self.assertEqual(endpoints['layer_15'].shape, [1, 1, 1, 1024]) self.assertEqual(endpoints['layer_15'].shape, [1, 1, 1, 1024])
def testMobilenetEdgeTpu(self): def testMobilenetEdgeTpu(self):
_, endpoints = mobilenet_v3.edge_tpu( _, endpoints = mobilenet_v3.edge_tpu(
tf.placeholder(tf.float32, (1, 224, 224, 3))) tf.compat.v1.placeholder(tf.float32, (1, 224, 224, 3)))
self.assertIn('Inference mode is created by default', self.assertIn('Inference mode is created by default',
mobilenet_v3.edge_tpu.__doc__) mobilenet_v3.edge_tpu.__doc__)
self.assertEqual(endpoints['layer_24'].shape, [1, 7, 7, 1280]) self.assertEqual(endpoints['layer_24'].shape, [1, 7, 7, 1280])
...@@ -53,13 +53,13 @@ class MobilenetV3Test(absltest.TestCase): ...@@ -53,13 +53,13 @@ class MobilenetV3Test(absltest.TestCase):
def testMobilenetEdgeTpuChangeScope(self): def testMobilenetEdgeTpuChangeScope(self):
_, endpoints = mobilenet_v3.edge_tpu( _, endpoints = mobilenet_v3.edge_tpu(
tf.placeholder(tf.float32, (1, 224, 224, 3)), scope='Scope') tf.compat.v1.placeholder(tf.float32, (1, 224, 224, 3)), scope='Scope')
self.assertStartsWith( self.assertStartsWith(
endpoints['layer_24'].name, 'Scope') endpoints['layer_24'].name, 'Scope')
def testMobilenetV3BaseOnly(self): def testMobilenetV3BaseOnly(self):
result, endpoints = mobilenet_v3.mobilenet( result, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)), tf.compat.v1.placeholder(tf.float32, (1, 224, 224, 3)),
conv_defs=mobilenet_v3.V3_LARGE, conv_defs=mobilenet_v3.V3_LARGE,
base_only=True, base_only=True,
final_endpoint='layer_17') final_endpoint='layer_17')
...@@ -67,6 +67,16 @@ class MobilenetV3Test(absltest.TestCase): ...@@ -67,6 +67,16 @@ class MobilenetV3Test(absltest.TestCase):
self.assertEqual(endpoints['layer_17'].shape, [1, 7, 7, 960]) self.assertEqual(endpoints['layer_17'].shape, [1, 7, 7, 960])
self.assertEqual(result, endpoints['layer_17']) self.assertEqual(result, endpoints['layer_17'])
def testMobilenetV3BaseOnly_VariableInput(self):
result, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (None, None, None, 3)),
conv_defs=mobilenet_v3.V3_LARGE,
base_only=True,
final_endpoint='layer_17')
# Get the latest layer before average pool.
self.assertEqual(endpoints['layer_17'].shape.as_list(),
[None, None, None, 960])
self.assertEqual(result, endpoints['layer_17'])
if __name__ == '__main__': if __name__ == '__main__':
absltest.main() absltest.main()
# MobilenetV2 and above # MobilenetV2 and above
For MobilenetV2+ see this file [mobilenet/README.md](mobilenet/README_md) For MobilenetV2+ see this file [mobilenet/README.md](mobilenet/README.md)
# MobileNetV1 # MobileNetV1
......
...@@ -162,8 +162,10 @@ def _fixed_padding(inputs, kernel_size, rate=1): ...@@ -162,8 +162,10 @@ def _fixed_padding(inputs, kernel_size, rate=1):
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2] pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]], padded_inputs = tf.pad(
[pad_beg[1], pad_end[1]], [0, 0]]) tensor=inputs,
paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],
[0, 0]])
return padded_inputs return padded_inputs
...@@ -231,7 +233,7 @@ def mobilenet_v1_base(inputs, ...@@ -231,7 +233,7 @@ def mobilenet_v1_base(inputs,
padding = 'SAME' padding = 'SAME'
if use_explicit_padding: if use_explicit_padding:
padding = 'VALID' padding = 'VALID'
with tf.variable_scope(scope, 'MobilenetV1', [inputs]): with tf.compat.v1.variable_scope(scope, 'MobilenetV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding): with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):
# The current_stride variable keeps track of the output stride of the # The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the # activations, i.e., the running product of convolution strides up to the
...@@ -357,17 +359,19 @@ def mobilenet_v1(inputs, ...@@ -357,17 +359,19 @@ def mobilenet_v1(inputs,
raise ValueError('Invalid input tensor rank, expected 4, was: %d' % raise ValueError('Invalid input tensor rank, expected 4, was: %d' %
len(input_shape)) len(input_shape))
with tf.variable_scope(scope, 'MobilenetV1', [inputs], reuse=reuse) as scope: with tf.compat.v1.variable_scope(
scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training): is_training=is_training):
net, end_points = mobilenet_v1_base(inputs, scope=scope, net, end_points = mobilenet_v1_base(inputs, scope=scope,
min_depth=min_depth, min_depth=min_depth,
depth_multiplier=depth_multiplier, depth_multiplier=depth_multiplier,
conv_defs=conv_defs) conv_defs=conv_defs)
with tf.variable_scope('Logits'): with tf.compat.v1.variable_scope('Logits'):
if global_pool: if global_pool:
# Global average pooling. # Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool') net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net end_points['global_pool'] = net
else: else:
# Pooling with a fixed kernel size. # Pooling with a fixed kernel size.
...@@ -431,7 +435,7 @@ def mobilenet_v1_arg_scope( ...@@ -431,7 +435,7 @@ def mobilenet_v1_arg_scope(
regularize_depthwise=False, regularize_depthwise=False,
batch_norm_decay=0.9997, batch_norm_decay=0.9997,
batch_norm_epsilon=0.001, batch_norm_epsilon=0.001,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS, batch_norm_updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS,
normalizer_fn=slim.batch_norm): normalizer_fn=slim.batch_norm):
"""Defines the default MobilenetV1 arg scope. """Defines the default MobilenetV1 arg scope.
...@@ -462,7 +466,7 @@ def mobilenet_v1_arg_scope( ...@@ -462,7 +466,7 @@ def mobilenet_v1_arg_scope(
batch_norm_params['is_training'] = is_training batch_norm_params['is_training'] = is_training
# Set weight_decay for weights in Conv and DepthSepConv layers. # Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev) weights_init = tf.compat.v1.truncated_normal_initializer(stddev=stddev)
regularizer = contrib_layers.l2_regularizer(weight_decay) regularizer = contrib_layers.l2_regularizer(weight_decay)
if regularize_depthwise: if regularize_depthwise:
depthwise_regularizer = regularizer depthwise_regularizer = regularizer
......
...@@ -29,7 +29,7 @@ from preprocessing import preprocessing_factory ...@@ -29,7 +29,7 @@ from preprocessing import preprocessing_factory
slim = contrib_slim slim = contrib_slim
flags = tf.app.flags flags = tf.compat.v1.app.flags
flags.DEFINE_string('master', '', 'Session master') flags.DEFINE_string('master', '', 'Session master')
flags.DEFINE_integer('batch_size', 250, 'Batch size') flags.DEFINE_integer('batch_size', 250, 'Batch size')
...@@ -74,7 +74,7 @@ def imagenet_input(is_training): ...@@ -74,7 +74,7 @@ def imagenet_input(is_training):
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size) image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
images, labels = tf.train.batch( images, labels = tf.compat.v1.train.batch(
tensors=[image, label], tensors=[image, label],
batch_size=FLAGS.batch_size, batch_size=FLAGS.batch_size,
num_threads=4, num_threads=4,
...@@ -94,8 +94,11 @@ def metrics(logits, labels): ...@@ -94,8 +94,11 @@ def metrics(logits, labels):
""" """
labels = tf.squeeze(labels) labels = tf.squeeze(labels)
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'Accuracy': tf.metrics.accuracy(tf.argmax(logits, 1), labels), 'Accuracy':
'Recall_5': tf.metrics.recall_at_k(labels, logits, 5), tf.compat.v1.metrics.accuracy(
tf.argmax(input=logits, axis=1), labels),
'Recall_5':
tf.compat.v1.metrics.recall_at_k(labels, logits, 5),
}) })
for name, value in names_to_values.iteritems(): for name, value in names_to_values.iteritems():
slim.summaries.add_scalar_summary( slim.summaries.add_scalar_summary(
...@@ -151,4 +154,4 @@ def main(unused_arg): ...@@ -151,4 +154,4 @@ def main(unused_arg):
if __name__ == '__main__': if __name__ == '__main__':
tf.app.run(main) tf.compat.v1.app.run(main)
...@@ -34,7 +34,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -34,7 +34,7 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith( self.assertTrue(logits.op.name.startswith(
'MobilenetV1/Logits/SpatialSqueeze')) 'MobilenetV1/Logits/SpatialSqueeze'))
...@@ -49,7 +49,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -49,7 +49,7 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = None num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool')) self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024]) self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
...@@ -60,7 +60,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -60,7 +60,7 @@ class MobilenetV1Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(inputs) net, end_points = mobilenet_v1.mobilenet_v1_base(inputs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13')) self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13'))
self.assertListEqual(net.get_shape().as_list(), self.assertListEqual(net.get_shape().as_list(),
...@@ -100,7 +100,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -100,7 +100,7 @@ class MobilenetV1Test(tf.test.TestCase):
'Conv2d_13_depthwise', 'Conv2d_13_pointwise'] 'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
for index, endpoint in enumerate(endpoints): for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default(): with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
out_tensor, end_points = mobilenet_v1.mobilenet_v1_base( out_tensor, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint=endpoint) inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith( self.assertTrue(out_tensor.op.name.startswith(
...@@ -117,7 +117,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -117,7 +117,7 @@ class MobilenetV1Test(tf.test.TestCase):
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512) mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512)
] ]
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base( net, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs) inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3')) self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3'))
...@@ -133,7 +133,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -133,7 +133,7 @@ class MobilenetV1Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm): normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base( _, end_points = mobilenet_v1.mobilenet_v1_base(
...@@ -186,7 +186,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -186,7 +186,7 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
output_stride = 16 output_stride = 16
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm): normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base( _, end_points = mobilenet_v1.mobilenet_v1_base(
...@@ -240,7 +240,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -240,7 +240,7 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
output_stride = 8 output_stride = 8
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm): normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base( _, end_points = mobilenet_v1.mobilenet_v1_base(
...@@ -293,7 +293,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -293,7 +293,7 @@ class MobilenetV1Test(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 128, 128 height, width = 128, 128
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm): normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base( _, end_points = mobilenet_v1.mobilenet_v1_base(
...@@ -345,7 +345,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -345,7 +345,7 @@ class MobilenetV1Test(tf.test.TestCase):
def testModelHasExpectedNumberOfParameters(self): def testModelHasExpectedNumberOfParameters(self):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm): normalizer_fn=slim.batch_norm):
mobilenet_v1.mobilenet_v1_base(inputs) mobilenet_v1.mobilenet_v1_base(inputs)
...@@ -358,7 +358,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -358,7 +358,7 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')] endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')]
...@@ -377,7 +377,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -377,7 +377,7 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() endpoint_keys = [key for key in end_points.keys()
...@@ -397,7 +397,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -397,7 +397,7 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1( _ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=-0.1) inputs, num_classes, depth_multiplier=-0.1)
...@@ -410,7 +410,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -410,7 +410,7 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 112, 112 height, width = 112, 112
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -420,31 +420,33 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -420,31 +420,33 @@ class MobilenetV1Test(tf.test.TestCase):
[batch_size, 4, 4, 1024]) [batch_size, 4, 4, 1024])
def testUnknownImageShape(self): def testUnknownImageShape(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
batch_size = 2 batch_size = 2
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise'] pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self): def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
batch_size = 1 batch_size = 1
height, width = 250, 300 height, width = 250, 300
num_classes = 1000 num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) inputs = tf.compat.v1.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes, logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes,
global_pool=True) global_pool=True)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
...@@ -452,7 +454,7 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -452,7 +454,7 @@ class MobilenetV1Test(tf.test.TestCase):
[batch_size, num_classes]) [batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise'] pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np} feed_dict = {inputs: input_np}
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024]) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
...@@ -461,15 +463,15 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -461,15 +463,15 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes) logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes]) [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -478,13 +480,13 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -478,13 +480,13 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3)) eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes, logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -494,27 +496,27 @@ class MobilenetV1Test(tf.test.TestCase): ...@@ -494,27 +496,27 @@ class MobilenetV1Test(tf.test.TestCase):
height, width = 150, 150 height, width = 150, 150
num_classes = 1000 num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) train_inputs = tf.random.uniform((train_batch_size, height, width, 3))
mobilenet_v1.mobilenet_v1(train_inputs, num_classes) mobilenet_v1.mobilenet_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes, logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
reuse=True) reuse=True)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess: with self.test_session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self): def testLogitsNotSqueezed(self):
num_classes = 25 num_classes = 25
images = tf.random_uniform([1, 224, 224, 3]) images = tf.random.uniform([1, 224, 224, 3])
logits, _ = mobilenet_v1.mobilenet_v1(images, logits, _ = mobilenet_v1.mobilenet_v1(images,
num_classes=num_classes, num_classes=num_classes,
spatial_squeeze=False) spatial_squeeze=False)
with self.test_session() as sess: with self.test_session() as sess:
tf.global_variables_initializer().run() tf.compat.v1.global_variables_initializer().run()
logits_out = sess.run(logits) logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
......
...@@ -28,7 +28,7 @@ from preprocessing import preprocessing_factory ...@@ -28,7 +28,7 @@ from preprocessing import preprocessing_factory
slim = contrib_slim slim = contrib_slim
flags = tf.app.flags flags = tf.compat.v1.app.flags
flags.DEFINE_string('master', '', 'Session master') flags.DEFINE_string('master', '', 'Session master')
flags.DEFINE_integer('task', 0, 'Task') flags.DEFINE_integer('task', 0, 'Task')
...@@ -104,11 +104,10 @@ def imagenet_input(is_training): ...@@ -104,11 +104,10 @@ def imagenet_input(is_training):
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size) image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
images, labels = tf.train.batch( images, labels = tf.compat.v1.train.batch([image, label],
[image, label], batch_size=FLAGS.batch_size,
batch_size=FLAGS.batch_size, num_threads=4,
num_threads=4, capacity=5 * FLAGS.batch_size)
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(labels, FLAGS.num_classes) labels = slim.one_hot_encoding(labels, FLAGS.num_classes)
return images, labels return images, labels
...@@ -123,7 +122,7 @@ def build_model(): ...@@ -123,7 +122,7 @@ def build_model():
""" """
g = tf.Graph() g = tf.Graph()
with g.as_default(), tf.device( with g.as_default(), tf.device(
tf.train.replica_device_setter(FLAGS.ps_tasks)): tf.compat.v1.train.replica_device_setter(FLAGS.ps_tasks)):
inputs, labels = imagenet_input(is_training=True) inputs, labels = imagenet_input(is_training=True)
with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)): with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)):
logits, _ = mobilenet_v1.mobilenet_v1( logits, _ = mobilenet_v1.mobilenet_v1(
...@@ -132,7 +131,7 @@ def build_model(): ...@@ -132,7 +131,7 @@ def build_model():
depth_multiplier=FLAGS.depth_multiplier, depth_multiplier=FLAGS.depth_multiplier,
num_classes=FLAGS.num_classes) num_classes=FLAGS.num_classes)
tf.losses.softmax_cross_entropy(labels, logits) tf.compat.v1.losses.softmax_cross_entropy(labels, logits)
# Call rewriter to produce graph with fake quant ops and folded batch norms # Call rewriter to produce graph with fake quant ops and folded batch norms
# quant_delay delays start of quantization till quant_delay steps, allowing # quant_delay delays start of quantization till quant_delay steps, allowing
...@@ -140,19 +139,19 @@ def build_model(): ...@@ -140,19 +139,19 @@ def build_model():
if FLAGS.quantize: if FLAGS.quantize:
contrib_quantize.create_training_graph(quant_delay=get_quant_delay()) contrib_quantize.create_training_graph(quant_delay=get_quant_delay())
total_loss = tf.losses.get_total_loss(name='total_loss') total_loss = tf.compat.v1.losses.get_total_loss(name='total_loss')
# Configure the learning rate using an exponential decay. # Configure the learning rate using an exponential decay.
num_epochs_per_decay = 2.5 num_epochs_per_decay = 2.5
imagenet_size = 1271167 imagenet_size = 1271167
decay_steps = int(imagenet_size / FLAGS.batch_size * num_epochs_per_decay) decay_steps = int(imagenet_size / FLAGS.batch_size * num_epochs_per_decay)
learning_rate = tf.train.exponential_decay( learning_rate = tf.compat.v1.train.exponential_decay(
get_learning_rate(), get_learning_rate(),
tf.train.get_or_create_global_step(), tf.compat.v1.train.get_or_create_global_step(),
decay_steps, decay_steps,
_LEARNING_RATE_DECAY_FACTOR, _LEARNING_RATE_DECAY_FACTOR,
staircase=True) staircase=True)
opt = tf.train.GradientDescentOptimizer(learning_rate) opt = tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
train_tensor = slim.learning.create_train_op( train_tensor = slim.learning.create_train_op(
total_loss, total_loss,
...@@ -167,7 +166,8 @@ def get_checkpoint_init_fn(): ...@@ -167,7 +166,8 @@ def get_checkpoint_init_fn():
"""Returns the checkpoint init_fn if the checkpoint is provided.""" """Returns the checkpoint init_fn if the checkpoint is provided."""
if FLAGS.fine_tune_checkpoint: if FLAGS.fine_tune_checkpoint:
variables_to_restore = slim.get_variables_to_restore() variables_to_restore = slim.get_variables_to_restore()
global_step_reset = tf.assign(tf.train.get_or_create_global_step(), 0) global_step_reset = tf.compat.v1.assign(
tf.compat.v1.train.get_or_create_global_step(), 0)
# When restoring from a floating point model, the min/max values for # When restoring from a floating point model, the min/max values for
# quantized weights and activations are not present. # quantized weights and activations are not present.
# We instruct slim to ignore variables that are missing during restoration # We instruct slim to ignore variables that are missing during restoration
...@@ -203,7 +203,7 @@ def train_model(): ...@@ -203,7 +203,7 @@ def train_model():
save_summaries_secs=FLAGS.save_summaries_secs, save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs, save_interval_secs=FLAGS.save_interval_secs,
init_fn=get_checkpoint_init_fn(), init_fn=get_checkpoint_init_fn(),
global_step=tf.train.get_global_step()) global_step=tf.compat.v1.train.get_global_step())
def main(unused_arg): def main(unused_arg):
...@@ -211,4 +211,4 @@ def main(unused_arg): ...@@ -211,4 +211,4 @@ def main(unused_arg):
if __name__ == '__main__': if __name__ == '__main__':
tf.app.run(main) tf.compat.v1.app.run(main)
...@@ -231,9 +231,9 @@ def nasnet_large_arg_scope(weight_decay=5e-5, ...@@ -231,9 +231,9 @@ def nasnet_large_arg_scope(weight_decay=5e-5,
def _build_aux_head(net, end_points, num_classes, hparams, scope): def _build_aux_head(net, end_points, num_classes, hparams, scope):
"""Auxiliary head used for all models across all datasets.""" """Auxiliary head used for all models across all datasets."""
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
with tf.variable_scope(scope): with tf.compat.v1.variable_scope(scope):
aux_logits = tf.identity(net) aux_logits = tf.identity(net)
with tf.variable_scope('aux_logits'): with tf.compat.v1.variable_scope('aux_logits'):
aux_logits = slim.avg_pool2d( aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID') aux_logits, [5, 5], stride=3, padding='VALID')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj') aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj')
...@@ -302,11 +302,12 @@ def build_nasnet_cifar(images, num_classes, ...@@ -302,11 +302,12 @@ def build_nasnet_cifar(images, num_classes,
_update_hparams(hparams, is_training) _update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC': if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW ' tf.compat.v1.logging.info(
'data format for increased speed on GPU.') 'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW': if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2]) images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network # Calculate the total number of cells in the network
# Add 2 for the reduction cells # Add 2 for the reduction cells
...@@ -354,11 +355,12 @@ def build_nasnet_mobile(images, num_classes, ...@@ -354,11 +355,12 @@ def build_nasnet_mobile(images, num_classes,
_update_hparams(hparams, is_training) _update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC': if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW ' tf.compat.v1.logging.info(
'data format for increased speed on GPU.') 'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW': if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2]) images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network # Calculate the total number of cells in the network
# Add 2 for the reduction cells # Add 2 for the reduction cells
...@@ -409,11 +411,12 @@ def build_nasnet_large(images, num_classes, ...@@ -409,11 +411,12 @@ def build_nasnet_large(images, num_classes,
_update_hparams(hparams, is_training) _update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC': if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW ' tf.compat.v1.logging.info(
'data format for increased speed on GPU.') 'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW': if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2]) images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network # Calculate the total number of cells in the network
# Add 2 for the reduction cells # Add 2 for the reduction cells
...@@ -534,7 +537,7 @@ def _build_nasnet_base(images, ...@@ -534,7 +537,7 @@ def _build_nasnet_base(images,
cell_outputs.append(net) cell_outputs.append(net)
# Final softmax layer # Final softmax layer
with tf.variable_scope('final_layer'): with tf.compat.v1.variable_scope('final_layer'):
net = activation_fn(net) net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net) net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes: if add_and_check_endpoint('global_pool', net) or not num_classes:
......
...@@ -31,8 +31,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -31,8 +31,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 32, 32 height, width = 32, 32
num_classes = 10 num_classes = 10
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
logits, end_points = nasnet.build_nasnet_cifar(inputs, num_classes) logits, end_points = nasnet.build_nasnet_cifar(inputs, num_classes)
auxlogits = end_points['AuxLogits'] auxlogits = end_points['AuxLogits']
...@@ -48,8 +48,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -48,8 +48,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
logits, end_points = nasnet.build_nasnet_mobile(inputs, num_classes) logits, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits'] auxlogits = end_points['AuxLogits']
...@@ -65,8 +65,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -65,8 +65,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 331, 331 height, width = 331, 331
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()): with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
logits, end_points = nasnet.build_nasnet_large(inputs, num_classes) logits, end_points = nasnet.build_nasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits'] auxlogits = end_points['AuxLogits']
...@@ -82,8 +82,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -82,8 +82,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 32, 32 height, width = 32, 32
num_classes = None num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
net, end_points = nasnet.build_nasnet_cifar(inputs, num_classes) net, end_points = nasnet.build_nasnet_cifar(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points) self.assertFalse('AuxLogits' in end_points)
...@@ -95,8 +95,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -95,8 +95,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
num_classes = None num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
net, end_points = nasnet.build_nasnet_mobile(inputs, num_classes) net, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points) self.assertFalse('AuxLogits' in end_points)
...@@ -108,8 +108,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -108,8 +108,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 331, 331 height, width = 331, 331
num_classes = None num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()): with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
net, end_points = nasnet.build_nasnet_large(inputs, num_classes) net, end_points = nasnet.build_nasnet_large(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points) self.assertFalse('AuxLogits' in end_points)
...@@ -121,8 +121,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -121,8 +121,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 32, 32 height, width = 32, 32
num_classes = 10 num_classes = 10
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
_, end_points = nasnet.build_nasnet_cifar(inputs, num_classes) _, end_points = nasnet.build_nasnet_cifar(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 32, 32, 96], endpoints_shapes = {'Stem': [batch_size, 32, 32, 96],
...@@ -153,7 +153,7 @@ class NASNetTest(tf.test.TestCase): ...@@ -153,7 +153,7 @@ class NASNetTest(tf.test.TestCase):
'Predictions': [batch_size, num_classes]} 'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes: for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name)) tf.compat.v1.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name] expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points) self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
...@@ -164,9 +164,9 @@ class NASNetTest(tf.test.TestCase): ...@@ -164,9 +164,9 @@ class NASNetTest(tf.test.TestCase):
height, width = 32, 32 height, width = 32, 32
num_classes = 10 num_classes = 10
for use_aux_head in (True, False): for use_aux_head in (True, False):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
config = nasnet.cifar_config() config = nasnet.cifar_config()
config.set_hparam('use_aux_head', int(use_aux_head)) config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
...@@ -178,8 +178,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -178,8 +178,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
_, end_points = nasnet.build_nasnet_mobile(inputs, num_classes) _, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 28, 28, 88], endpoints_shapes = {'Stem': [batch_size, 28, 28, 88],
...@@ -204,7 +204,7 @@ class NASNetTest(tf.test.TestCase): ...@@ -204,7 +204,7 @@ class NASNetTest(tf.test.TestCase):
'Predictions': [batch_size, num_classes]} 'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes: for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name)) tf.compat.v1.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name] expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points) self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
...@@ -215,9 +215,9 @@ class NASNetTest(tf.test.TestCase): ...@@ -215,9 +215,9 @@ class NASNetTest(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
for use_aux_head in (True, False): for use_aux_head in (True, False):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
config = nasnet.mobile_imagenet_config() config = nasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head)) config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
...@@ -229,8 +229,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -229,8 +229,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 331, 331 height, width = 331, 331
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()): with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
_, end_points = nasnet.build_nasnet_large(inputs, num_classes) _, end_points = nasnet.build_nasnet_large(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 42, 42, 336], endpoints_shapes = {'Stem': [batch_size, 42, 42, 336],
...@@ -261,7 +261,7 @@ class NASNetTest(tf.test.TestCase): ...@@ -261,7 +261,7 @@ class NASNetTest(tf.test.TestCase):
'Predictions': [batch_size, num_classes]} 'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes: for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name)) tf.compat.v1.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name] expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points) self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
...@@ -272,9 +272,9 @@ class NASNetTest(tf.test.TestCase): ...@@ -272,9 +272,9 @@ class NASNetTest(tf.test.TestCase):
height, width = 331, 331 height, width = 331, 331
num_classes = 1000 num_classes = 1000
for use_aux_head in (True, False): for use_aux_head in (True, False):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
config = nasnet.large_imagenet_config() config = nasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head)) config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_large_arg_scope()): with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
...@@ -286,18 +286,20 @@ class NASNetTest(tf.test.TestCase): ...@@ -286,18 +286,20 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
# Force all Variables to reside on the device. # Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): with tf.compat.v1.variable_scope('on_cpu'), tf.device('/cpu:0'):
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
nasnet.build_nasnet_mobile(inputs, num_classes) nasnet.build_nasnet_mobile(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): with tf.compat.v1.variable_scope('on_gpu'), tf.device('/gpu:0'):
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
nasnet.build_nasnet_mobile(inputs, num_classes) nasnet.build_nasnet_mobile(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0') self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0') self.assertDeviceEqual(v.device, '/gpu:0')
def testUnknownBatchSizeMobileModel(self): def testUnknownBatchSizeMobileModel(self):
...@@ -305,13 +307,13 @@ class NASNetTest(tf.test.TestCase): ...@@ -305,13 +307,13 @@ class NASNetTest(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3)) inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
logits, _ = nasnet.build_nasnet_mobile(inputs, num_classes) logits, _ = nasnet.build_nasnet_mobile(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes]) [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3)) images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()}) output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes)) self.assertEquals(output.shape, (batch_size, num_classes))
...@@ -320,13 +322,13 @@ class NASNetTest(tf.test.TestCase): ...@@ -320,13 +322,13 @@ class NASNetTest(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
with self.test_session() as sess: with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3)) eval_inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
logits, _ = nasnet.build_nasnet_mobile(eval_inputs, logits, _ = nasnet.build_nasnet_mobile(eval_inputs,
num_classes, num_classes,
is_training=False) is_training=False)
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(predictions) output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,)) self.assertEquals(output.shape, (batch_size,))
...@@ -334,8 +336,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -334,8 +336,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 32, 32 height, width = 32, 32
num_classes = 10 num_classes = 10
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
config = nasnet.cifar_config() config = nasnet.cifar_config()
config.set_hparam('data_format', 'NCHW') config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
...@@ -348,8 +350,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -348,8 +350,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
config = nasnet.mobile_imagenet_config() config = nasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW') config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()): with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
...@@ -362,8 +364,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -362,8 +364,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 331, 331 height, width = 331, 331
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
config = nasnet.large_imagenet_config() config = nasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW') config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_large_arg_scope()): with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
...@@ -376,8 +378,8 @@ class NASNetTest(tf.test.TestCase): ...@@ -376,8 +378,8 @@ class NASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 32, 32 height, width = 32, 32
num_classes = 10 num_classes = 10
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
global_step = tf.train.create_global_step() global_step = tf.compat.v1.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
logits, end_points = nasnet.build_nasnet_cifar(inputs, logits, end_points = nasnet.build_nasnet_cifar(inputs,
num_classes, num_classes,
...@@ -396,14 +398,14 @@ class NASNetTest(tf.test.TestCase): ...@@ -396,14 +398,14 @@ class NASNetTest(tf.test.TestCase):
height, width = 32, 32 height, width = 32, 32
num_classes = 10 num_classes = 10
for use_bounded_activation in (True, False): for use_bounded_activation in (True, False):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
config = nasnet.cifar_config() config = nasnet.cifar_config()
config.set_hparam('use_bounded_activation', use_bounded_activation) config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()): with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
_, _ = nasnet.build_nasnet_cifar( _, _ = nasnet.build_nasnet_cifar(
inputs, num_classes, config=config) inputs, num_classes, config=config)
for node in tf.get_default_graph().as_graph_def().node: for node in tf.compat.v1.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'): if node.op.startswith('Relu'):
self.assertEqual(node.op == 'Relu6', use_bounded_activation) self.assertEqual(node.op == 'Relu6', use_bounded_activation)
......
...@@ -82,9 +82,9 @@ def global_avg_pool(x, data_format=INVALID): ...@@ -82,9 +82,9 @@ def global_avg_pool(x, data_format=INVALID):
assert data_format in ['NHWC', 'NCHW'] assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4 assert x.shape.ndims == 4
if data_format == 'NHWC': if data_format == 'NHWC':
return tf.reduce_mean(x, [1, 2]) return tf.reduce_mean(input_tensor=x, axis=[1, 2])
else: else:
return tf.reduce_mean(x, [2, 3]) return tf.reduce_mean(input_tensor=x, axis=[2, 3])
@contrib_framework.add_arg_scope @contrib_framework.add_arg_scope
...@@ -101,8 +101,12 @@ def factorized_reduction(net, output_filters, stride, data_format=INVALID): ...@@ -101,8 +101,12 @@ def factorized_reduction(net, output_filters, stride, data_format=INVALID):
stride_spec = [1, 1, stride, stride] stride_spec = [1, 1, stride, stride]
# Skip path 1 # Skip path 1
path1 = tf.nn.avg_pool( path1 = tf.compat.v2.nn.avg_pool2d(
net, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format) input=net,
ksize=[1, 1, 1, 1],
strides=stride_spec,
padding='VALID',
data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv') path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2 # Skip path 2
...@@ -110,15 +114,19 @@ def factorized_reduction(net, output_filters, stride, data_format=INVALID): ...@@ -110,15 +114,19 @@ def factorized_reduction(net, output_filters, stride, data_format=INVALID):
# include those 0's that were added. # include those 0's that were added.
if data_format == 'NHWC': if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]] pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(net, pad_arr)[:, 1:, 1:, :] path2 = tf.pad(tensor=net, paddings=pad_arr)[:, 1:, 1:, :]
concat_axis = 3 concat_axis = 3
else: else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]] pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(net, pad_arr)[:, :, 1:, 1:] path2 = tf.pad(tensor=net, paddings=pad_arr)[:, :, 1:, 1:]
concat_axis = 1 concat_axis = 1
path2 = tf.nn.avg_pool( path2 = tf.compat.v2.nn.avg_pool2d(
path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format) input=path2,
ksize=[1, 1, 1, 1],
strides=stride_spec,
padding='VALID',
data_format=data_format)
# If odd number of filters, add an additional one to the second path. # If odd number of filters, add an additional one to the second path.
final_filter_size = int(output_filters / 2) + int(output_filters % 2) final_filter_size = int(output_filters / 2) + int(output_filters % 2)
...@@ -134,10 +142,10 @@ def factorized_reduction(net, output_filters, stride, data_format=INVALID): ...@@ -134,10 +142,10 @@ def factorized_reduction(net, output_filters, stride, data_format=INVALID):
def drop_path(net, keep_prob, is_training=True): def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability.""" """Drops out a whole example hiddenstate with the specified probability."""
if is_training: if is_training:
batch_size = tf.shape(net)[0] batch_size = tf.shape(input=net)[0]
noise_shape = [batch_size, 1, 1, 1] noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32) random_tensor += tf.random.uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype) binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype) keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype)
net = net * keep_prob_inv * binary_tensor net = net * keep_prob_inv * binary_tensor
...@@ -316,10 +324,10 @@ class NasNetABaseCell(object): ...@@ -316,10 +324,10 @@ class NasNetABaseCell(object):
self._filter_size = int(self._num_conv_filters * filter_scaling) self._filter_size = int(self._num_conv_filters * filter_scaling)
i = 0 i = 0
with tf.variable_scope(scope): with tf.compat.v1.variable_scope(scope):
net = self._cell_base(net, prev_layer) net = self._cell_base(net, prev_layer)
for iteration in range(5): for iteration in range(5):
with tf.variable_scope('comb_iter_{}'.format(iteration)): with tf.compat.v1.variable_scope('comb_iter_{}'.format(iteration)):
left_hiddenstate_idx, right_hiddenstate_idx = ( left_hiddenstate_idx, right_hiddenstate_idx = (
self._hiddenstate_indices[i], self._hiddenstate_indices[i],
self._hiddenstate_indices[i + 1]) self._hiddenstate_indices[i + 1])
...@@ -332,17 +340,17 @@ class NasNetABaseCell(object): ...@@ -332,17 +340,17 @@ class NasNetABaseCell(object):
operation_right = self._operations[i+1] operation_right = self._operations[i+1]
i += 2 i += 2
# Apply conv operations # Apply conv operations
with tf.variable_scope('left'): with tf.compat.v1.variable_scope('left'):
h1 = self._apply_conv_operation(h1, operation_left, h1 = self._apply_conv_operation(h1, operation_left,
stride, original_input_left, stride, original_input_left,
current_step) current_step)
with tf.variable_scope('right'): with tf.compat.v1.variable_scope('right'):
h2 = self._apply_conv_operation(h2, operation_right, h2 = self._apply_conv_operation(h2, operation_right,
stride, original_input_right, stride, original_input_right,
current_step) current_step)
# Combine hidden states using 'add'. # Combine hidden states using 'add'.
with tf.variable_scope('combine'): with tf.compat.v1.variable_scope('combine'):
h = h1 + h2 h = h1 + h2
if self._use_bounded_activation: if self._use_bounded_activation:
h = tf.nn.relu6(h) h = tf.nn.relu6(h)
...@@ -350,7 +358,7 @@ class NasNetABaseCell(object): ...@@ -350,7 +358,7 @@ class NasNetABaseCell(object):
# Add hiddenstate to the list of hiddenstates we can choose from # Add hiddenstate to the list of hiddenstates we can choose from
net.append(h) net.append(h)
with tf.variable_scope('cell_output'): with tf.compat.v1.variable_scope('cell_output'):
net = self._combine_unused_states(net) net = self._combine_unused_states(net)
return net return net
...@@ -411,7 +419,7 @@ class NasNetABaseCell(object): ...@@ -411,7 +419,7 @@ class NasNetABaseCell(object):
should_reduce = should_reduce and not used_h should_reduce = should_reduce and not used_h
if should_reduce: if should_reduce:
stride = 2 if final_height != curr_height else 1 stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)): with tf.compat.v1.variable_scope('reduction_{}'.format(idx)):
net[idx] = factorized_reduction( net[idx] = factorized_reduction(
net[idx], final_num_filters, stride) net[idx], final_num_filters, stride)
...@@ -452,23 +460,24 @@ class NasNetABaseCell(object): ...@@ -452,23 +460,24 @@ class NasNetABaseCell(object):
layer_ratio = (self._cell_num + 1)/float(num_cells) layer_ratio = (self._cell_num + 1)/float(num_cells)
if use_summaries: if use_summaries:
with tf.device('/cpu:0'): with tf.device('/cpu:0'):
tf.summary.scalar('layer_ratio', layer_ratio) tf.compat.v1.summary.scalar('layer_ratio', layer_ratio)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob) drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
if drop_connect_version in ['v1', 'v3']: if drop_connect_version in ['v1', 'v3']:
# Decrease the keep probability over time # Decrease the keep probability over time
if current_step is None: if current_step is None:
current_step = tf.train.get_or_create_global_step() current_step = tf.compat.v1.train.get_or_create_global_step()
current_step = tf.cast(current_step, tf.float32) current_step = tf.cast(current_step, tf.float32)
drop_path_burn_in_steps = self._total_training_steps drop_path_burn_in_steps = self._total_training_steps
current_ratio = current_step / drop_path_burn_in_steps current_ratio = current_step / drop_path_burn_in_steps
current_ratio = tf.minimum(1.0, current_ratio) current_ratio = tf.minimum(1.0, current_ratio)
if use_summaries: if use_summaries:
with tf.device('/cpu:0'): with tf.device('/cpu:0'):
tf.summary.scalar('current_ratio', current_ratio) tf.compat.v1.summary.scalar('current_ratio', current_ratio)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob)) drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
if use_summaries: if use_summaries:
with tf.device('/cpu:0'): with tf.device('/cpu:0'):
tf.summary.scalar('drop_path_keep_prob', drop_path_keep_prob) tf.compat.v1.summary.scalar('drop_path_keep_prob',
drop_path_keep_prob)
net = drop_path(net, drop_path_keep_prob) net = drop_path(net, drop_path_keep_prob)
return net return net
......
...@@ -51,7 +51,7 @@ class NasnetUtilsTest(tf.test.TestCase): ...@@ -51,7 +51,7 @@ class NasnetUtilsTest(tf.test.TestCase):
def testGlobalAvgPool(self): def testGlobalAvgPool(self):
data_formats = ['NHWC', 'NCHW'] data_formats = ['NHWC', 'NCHW']
inputs = tf.placeholder(tf.float32, (5, 10, 20, 10)) inputs = tf.compat.v1.placeholder(tf.float32, (5, 10, 20, 10))
for data_format in data_formats: for data_format in data_formats:
output = nasnet_utils.global_avg_pool( output = nasnet_utils.global_avg_pool(
inputs, data_format) inputs, data_format)
......
...@@ -147,7 +147,7 @@ def _build_pnasnet_base(images, ...@@ -147,7 +147,7 @@ def _build_pnasnet_base(images,
# pylint: enable=protected-access # pylint: enable=protected-access
# Final softmax layer # Final softmax layer
with tf.variable_scope('final_layer'): with tf.compat.v1.variable_scope('final_layer'):
net = activation_fn(net) net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net) net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes: if add_and_check_endpoint('global_pool', net) or not num_classes:
...@@ -176,11 +176,12 @@ def build_pnasnet_large(images, ...@@ -176,11 +176,12 @@ def build_pnasnet_large(images,
# pylint: enable=protected-access # pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC': if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW ' tf.compat.v1.logging.info(
'data format for increased speed on GPU.') 'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW': if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2]) images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network. # Calculate the total number of cells in the network.
# There is no distinction between reduction and normal cells in PNAS so the # There is no distinction between reduction and normal cells in PNAS so the
...@@ -224,11 +225,12 @@ def build_pnasnet_mobile(images, ...@@ -224,11 +225,12 @@ def build_pnasnet_mobile(images,
# pylint: enable=protected-access # pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC': if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW ' tf.compat.v1.logging.info(
'data format for increased speed on GPU.') 'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW': if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2]) images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network. # Calculate the total number of cells in the network.
# There is no distinction between reduction and normal cells in PNAS so the # There is no distinction between reduction and normal cells in PNAS so the
......
...@@ -31,8 +31,8 @@ class PNASNetTest(tf.test.TestCase): ...@@ -31,8 +31,8 @@ class PNASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 331, 331 height, width = 331, 331
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits'] auxlogits = end_points['AuxLogits']
...@@ -48,8 +48,8 @@ class PNASNetTest(tf.test.TestCase): ...@@ -48,8 +48,8 @@ class PNASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes) logits, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits'] auxlogits = end_points['AuxLogits']
...@@ -63,21 +63,21 @@ class PNASNetTest(tf.test.TestCase): ...@@ -63,21 +63,21 @@ class PNASNetTest(tf.test.TestCase):
def testBuildNonExistingLayerLargeModel(self): def testBuildNonExistingLayerLargeModel(self):
"""Tests that the model is built correctly without unnecessary layers.""" """Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random_uniform((5, 331, 331, 3)) inputs = tf.random.uniform((5, 331, 331, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
pnasnet.build_pnasnet_large(inputs, 1000) pnasnet.build_pnasnet_large(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()] vars_names = [x.op.name for x in tf.compat.v1.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names) self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names) self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildNonExistingLayerMobileModel(self): def testBuildNonExistingLayerMobileModel(self):
"""Tests that the model is built correctly without unnecessary layers.""" """Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random_uniform((5, 224, 224, 3)) inputs = tf.random.uniform((5, 224, 224, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
pnasnet.build_pnasnet_mobile(inputs, 1000) pnasnet.build_pnasnet_mobile(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()] vars_names = [x.op.name for x in tf.compat.v1.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names) self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names) self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
...@@ -85,8 +85,8 @@ class PNASNetTest(tf.test.TestCase): ...@@ -85,8 +85,8 @@ class PNASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 331, 331 height, width = 331, 331
num_classes = None num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points) self.assertFalse('AuxLogits' in end_points)
...@@ -98,8 +98,8 @@ class PNASNetTest(tf.test.TestCase): ...@@ -98,8 +98,8 @@ class PNASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
num_classes = None num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
net, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes) net, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points) self.assertFalse('AuxLogits' in end_points)
...@@ -111,8 +111,8 @@ class PNASNetTest(tf.test.TestCase): ...@@ -111,8 +111,8 @@ class PNASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 331, 331 height, width = 331, 331
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) _, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
...@@ -138,7 +138,7 @@ class PNASNetTest(tf.test.TestCase): ...@@ -138,7 +138,7 @@ class PNASNetTest(tf.test.TestCase):
self.assertEqual(len(end_points), 17) self.assertEqual(len(end_points), 17)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes: for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name)) tf.compat.v1.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name] expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points) self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
...@@ -148,8 +148,8 @@ class PNASNetTest(tf.test.TestCase): ...@@ -148,8 +148,8 @@ class PNASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes) _, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
...@@ -173,7 +173,7 @@ class PNASNetTest(tf.test.TestCase): ...@@ -173,7 +173,7 @@ class PNASNetTest(tf.test.TestCase):
self.assertEqual(len(end_points), 14) self.assertEqual(len(end_points), 14)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes: for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name)) tf.compat.v1.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name] expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points) self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
...@@ -184,9 +184,9 @@ class PNASNetTest(tf.test.TestCase): ...@@ -184,9 +184,9 @@ class PNASNetTest(tf.test.TestCase):
height, width = 331, 331 height, width = 331, 331
num_classes = 1000 num_classes = 1000
for use_aux_head in (True, False): for use_aux_head in (True, False):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
config = pnasnet.large_imagenet_config() config = pnasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head)) config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
...@@ -199,9 +199,9 @@ class PNASNetTest(tf.test.TestCase): ...@@ -199,9 +199,9 @@ class PNASNetTest(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
for use_aux_head in (True, False): for use_aux_head in (True, False):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
config = pnasnet.mobile_imagenet_config() config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head)) config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
...@@ -213,8 +213,8 @@ class PNASNetTest(tf.test.TestCase): ...@@ -213,8 +213,8 @@ class PNASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 331, 331 height, width = 331, 331
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
config = pnasnet.large_imagenet_config() config = pnasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW') config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
...@@ -227,8 +227,8 @@ class PNASNetTest(tf.test.TestCase): ...@@ -227,8 +227,8 @@ class PNASNetTest(tf.test.TestCase):
batch_size = 5 batch_size = 5
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step() tf.compat.v1.train.create_global_step()
config = pnasnet.mobile_imagenet_config() config = pnasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW') config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
...@@ -242,14 +242,14 @@ class PNASNetTest(tf.test.TestCase): ...@@ -242,14 +242,14 @@ class PNASNetTest(tf.test.TestCase):
height, width = 224, 224 height, width = 224, 224
num_classes = 1000 num_classes = 1000
for use_bounded_activation in (True, False): for use_bounded_activation in (True, False):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
config = pnasnet.mobile_imagenet_config() config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_bounded_activation', use_bounded_activation) config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()): with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, _ = pnasnet.build_pnasnet_mobile( _, _ = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config) inputs, num_classes, config=config)
for node in tf.get_default_graph().as_graph_def().node: for node in tf.compat.v1.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'): if node.op.startswith('Relu'):
self.assertEqual(node.op == 'Relu6', use_bounded_activation) self.assertEqual(node.op == 'Relu6', use_bounded_activation)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment