Commit d4eedbb9 authored by Mark Sandler's avatar Mark Sandler Committed by Hongkun Yu
Browse files

Merged commit includes the following changes: (#8077)



Internal cleanup (py2->py3) plus the following changes:

285513318  by Sergio Guadarrama:

    Adds a script for post-training quantization

284222305  by Sergio Guadarrama:

    Modified squeeze-excite operation to accommodate tensors of undefined (Nonetype) H/W.

282028343  by Sergio Guadarrama:

    Add MobilenetV3 and MobilenetEdgeTPU to the slim/nets_factory.

PiperOrigin-RevId: 289455329
Co-authored-by: default avatarSergio Guadarrama <sguada@gmail.com>
parent 0e0a94a6
...@@ -32,13 +32,15 @@ from nets import resnet_v2 ...@@ -32,13 +32,15 @@ from nets import resnet_v2
from nets import s3dg from nets import s3dg
from nets import vgg from nets import vgg
from nets.mobilenet import mobilenet_v2 from nets.mobilenet import mobilenet_v2
from nets.mobilenet import mobilenet_v3
from nets.nasnet import nasnet from nets.nasnet import nasnet
from nets.nasnet import pnasnet from nets.nasnet import pnasnet
slim = contrib_slim slim = contrib_slim
networks_map = {'alexnet_v2': alexnet.alexnet_v2, networks_map = {
'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet, 'cifarnet': cifarnet.cifarnet,
'overfeat': overfeat.overfeat, 'overfeat': overfeat.overfeat,
'vgg_a': vgg.vgg_a, 'vgg_a': vgg.vgg_a,
...@@ -67,14 +69,21 @@ networks_map = {'alexnet_v2': alexnet.alexnet_v2, ...@@ -67,14 +69,21 @@ networks_map = {'alexnet_v2': alexnet.alexnet_v2,
'mobilenet_v2': mobilenet_v2.mobilenet, 'mobilenet_v2': mobilenet_v2.mobilenet,
'mobilenet_v2_140': mobilenet_v2.mobilenet_v2_140, 'mobilenet_v2_140': mobilenet_v2.mobilenet_v2_140,
'mobilenet_v2_035': mobilenet_v2.mobilenet_v2_035, 'mobilenet_v2_035': mobilenet_v2.mobilenet_v2_035,
'mobilenet_v3_small': mobilenet_v3.small,
'mobilenet_v3_large': mobilenet_v3.large,
'mobilenet_v3_small_minimalistic': mobilenet_v3.small_minimalistic,
'mobilenet_v3_large_minimalistic': mobilenet_v3.large_minimalistic,
'mobilenet_edgetpu': mobilenet_v3.edge_tpu,
'mobilenet_edgetpu_075': mobilenet_v3.edge_tpu_075,
'nasnet_cifar': nasnet.build_nasnet_cifar, 'nasnet_cifar': nasnet.build_nasnet_cifar,
'nasnet_mobile': nasnet.build_nasnet_mobile, 'nasnet_mobile': nasnet.build_nasnet_mobile,
'nasnet_large': nasnet.build_nasnet_large, 'nasnet_large': nasnet.build_nasnet_large,
'pnasnet_large': pnasnet.build_pnasnet_large, 'pnasnet_large': pnasnet.build_pnasnet_large,
'pnasnet_mobile': pnasnet.build_pnasnet_mobile, 'pnasnet_mobile': pnasnet.build_pnasnet_mobile,
} }
arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope, arg_scopes_map = {
'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'cifarnet': cifarnet.cifarnet_arg_scope, 'cifarnet': cifarnet.cifarnet_arg_scope,
'overfeat': overfeat.overfeat_arg_scope, 'overfeat': overfeat.overfeat_arg_scope,
'vgg_a': vgg.vgg_arg_scope, 'vgg_a': vgg.vgg_arg_scope,
...@@ -84,8 +93,7 @@ arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope, ...@@ -84,8 +93,7 @@ arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'inception_v2': inception.inception_v3_arg_scope, 'inception_v2': inception.inception_v3_arg_scope,
'inception_v3': inception.inception_v3_arg_scope, 'inception_v3': inception.inception_v3_arg_scope,
'inception_v4': inception.inception_v4_arg_scope, 'inception_v4': inception.inception_v4_arg_scope,
'inception_resnet_v2': 'inception_resnet_v2': inception.inception_resnet_v2_arg_scope,
inception.inception_resnet_v2_arg_scope,
'i3d': i3d.i3d_arg_scope, 'i3d': i3d.i3d_arg_scope,
's3dg': s3dg.s3dg_arg_scope, 's3dg': s3dg.s3dg_arg_scope,
'lenet': lenet.lenet_arg_scope, 'lenet': lenet.lenet_arg_scope,
...@@ -104,12 +112,18 @@ arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope, ...@@ -104,12 +112,18 @@ arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'mobilenet_v2': mobilenet_v2.training_scope, 'mobilenet_v2': mobilenet_v2.training_scope,
'mobilenet_v2_035': mobilenet_v2.training_scope, 'mobilenet_v2_035': mobilenet_v2.training_scope,
'mobilenet_v2_140': mobilenet_v2.training_scope, 'mobilenet_v2_140': mobilenet_v2.training_scope,
'mobilenet_v3_small': mobilenet_v3.training_scope,
'mobilenet_v3_large': mobilenet_v3.training_scope,
'mobilenet_v3_small_minimalistic': mobilenet_v3.training_scope,
'mobilenet_v3_large_minimalistic': mobilenet_v3.training_scope,
'mobilenet_edgetpu': mobilenet_v3.training_scope,
'mobilenet_edgetpu_075': mobilenet_v3.training_scope,
'nasnet_cifar': nasnet.nasnet_cifar_arg_scope, 'nasnet_cifar': nasnet.nasnet_cifar_arg_scope,
'nasnet_mobile': nasnet.nasnet_mobile_arg_scope, 'nasnet_mobile': nasnet.nasnet_mobile_arg_scope,
'nasnet_large': nasnet.nasnet_large_arg_scope, 'nasnet_large': nasnet.nasnet_large_arg_scope,
'pnasnet_large': pnasnet.pnasnet_large_arg_scope, 'pnasnet_large': pnasnet.pnasnet_large_arg_scope,
'pnasnet_mobile': pnasnet.pnasnet_mobile_arg_scope, 'pnasnet_mobile': pnasnet.pnasnet_mobile_arg_scope,
} }
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False): def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
......
...@@ -36,8 +36,7 @@ class NetworksTest(tf.test.TestCase): ...@@ -36,8 +36,7 @@ class NetworksTest(tf.test.TestCase):
# Most networks use 224 as their default_image_size # Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224) image_size = getattr(net_fn, 'default_image_size', 224)
if net not in ['i3d', 's3dg']: if net not in ['i3d', 's3dg']:
inputs = tf.random_uniform( inputs = tf.random.uniform((batch_size, image_size, image_size, 3))
(batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs) logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor)) self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict)) self.assertTrue(isinstance(end_points, dict))
...@@ -53,8 +52,7 @@ class NetworksTest(tf.test.TestCase): ...@@ -53,8 +52,7 @@ class NetworksTest(tf.test.TestCase):
# Most networks use 224 as their default_image_size # Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224) image_size = getattr(net_fn, 'default_image_size', 224)
if net not in ['i3d', 's3dg']: if net not in ['i3d', 's3dg']:
inputs = tf.random_uniform( inputs = tf.random.uniform((batch_size, image_size, image_size, 3))
(batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs) logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor)) self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict)) self.assertTrue(isinstance(end_points, dict))
...@@ -69,8 +67,7 @@ class NetworksTest(tf.test.TestCase): ...@@ -69,8 +67,7 @@ class NetworksTest(tf.test.TestCase):
net_fn = nets_factory.get_network_fn(net, num_classes=num_classes) net_fn = nets_factory.get_network_fn(net, num_classes=num_classes)
# Most networks use 224 as their default_image_size # Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224) // 2 image_size = getattr(net_fn, 'default_image_size', 224) // 2
inputs = tf.random_uniform( inputs = tf.random.uniform((batch_size, 10, image_size, image_size, 3))
(batch_size, 10, image_size, image_size, 3))
logits, end_points = net_fn(inputs) logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor)) self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict)) self.assertTrue(isinstance(end_points, dict))
......
...@@ -35,14 +35,17 @@ import tensorflow as tf ...@@ -35,14 +35,17 @@ import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005): def overfeat_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected], with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay), weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()): biases_initializer=tf.compat.v1.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME'): with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc: with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc return arg_sc
...@@ -88,7 +91,7 @@ def overfeat(inputs, ...@@ -88,7 +91,7 @@ def overfeat(inputs,
None). None).
end_points: a dict of tensors with intermediate activations. end_points: a dict of tensors with intermediate activations.
""" """
with tf.variable_scope(scope, 'overfeat', [inputs]) as sc: with tf.compat.v1.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points' end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d # Collect outputs for conv2d, fully_connected and max_pool2d
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
...@@ -104,9 +107,10 @@ def overfeat(inputs, ...@@ -104,9 +107,10 @@ def overfeat(inputs,
net = slim.max_pool2d(net, [2, 2], scope='pool5') net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers. # Use conv2d instead of fully_connected layers.
with slim.arg_scope([slim.conv2d], with slim.arg_scope(
[slim.conv2d],
weights_initializer=trunc_normal(0.005), weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1)): biases_initializer=tf.compat.v1.constant_initializer(0.1)):
net = slim.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6') net = slim.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6') scope='dropout6')
...@@ -115,15 +119,18 @@ def overfeat(inputs, ...@@ -115,15 +119,18 @@ def overfeat(inputs,
end_points = slim.utils.convert_collection_to_dict( end_points = slim.utils.convert_collection_to_dict(
end_points_collection) end_points_collection)
if global_pool: if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool') net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net end_points['global_pool'] = net
if num_classes: if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7') scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1], net = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None, activation_fn=None,
normalizer_fn=None, normalizer_fn=None,
biases_initializer=tf.zeros_initializer(), biases_initializer=tf.compat.v1.zeros_initializer(),
scope='fc8') scope='fc8')
if spatial_squeeze: if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed') net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
......
...@@ -32,7 +32,7 @@ class OverFeatTest(tf.test.TestCase): ...@@ -32,7 +32,7 @@ class OverFeatTest(tf.test.TestCase):
height, width = 231, 231 height, width = 231, 231
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes) logits, _ = overfeat.overfeat(inputs, num_classes)
self.assertEquals(logits.op.name, 'overfeat/fc8/squeezed') self.assertEquals(logits.op.name, 'overfeat/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -43,7 +43,7 @@ class OverFeatTest(tf.test.TestCase): ...@@ -43,7 +43,7 @@ class OverFeatTest(tf.test.TestCase):
height, width = 281, 281 height, width = 281, 281
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False) logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd') self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
...@@ -54,7 +54,7 @@ class OverFeatTest(tf.test.TestCase): ...@@ -54,7 +54,7 @@ class OverFeatTest(tf.test.TestCase):
height, width = 281, 281 height, width = 281, 281
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False, logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False,
global_pool=True) global_pool=True)
self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd') self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
...@@ -66,7 +66,7 @@ class OverFeatTest(tf.test.TestCase): ...@@ -66,7 +66,7 @@ class OverFeatTest(tf.test.TestCase):
height, width = 231, 231 height, width = 231, 231
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = overfeat.overfeat(inputs, num_classes) _, end_points = overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1', expected_names = ['overfeat/conv1',
'overfeat/pool1', 'overfeat/pool1',
...@@ -87,7 +87,7 @@ class OverFeatTest(tf.test.TestCase): ...@@ -87,7 +87,7 @@ class OverFeatTest(tf.test.TestCase):
height, width = 231, 231 height, width = 231, 231
num_classes = None num_classes = None
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = overfeat.overfeat(inputs, num_classes) net, end_points = overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1', expected_names = ['overfeat/conv1',
'overfeat/pool1', 'overfeat/pool1',
...@@ -108,7 +108,7 @@ class OverFeatTest(tf.test.TestCase): ...@@ -108,7 +108,7 @@ class OverFeatTest(tf.test.TestCase):
height, width = 231, 231 height, width = 231, 231
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
overfeat.overfeat(inputs, num_classes) overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1/weights', expected_names = ['overfeat/conv1/weights',
'overfeat/conv1/biases', 'overfeat/conv1/biases',
...@@ -135,11 +135,11 @@ class OverFeatTest(tf.test.TestCase): ...@@ -135,11 +135,11 @@ class OverFeatTest(tf.test.TestCase):
height, width = 231, 231 height, width = 231, 231
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3)) eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False) logits, _ = overfeat.overfeat(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes]) [batch_size, num_classes])
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size]) self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self): def testTrainEvalWithReuse(self):
...@@ -149,29 +149,29 @@ class OverFeatTest(tf.test.TestCase): ...@@ -149,29 +149,29 @@ class OverFeatTest(tf.test.TestCase):
eval_height, eval_width = 281, 281 eval_height, eval_width = 281, 281
num_classes = 1000 num_classes = 1000
with self.test_session(): with self.test_session():
train_inputs = tf.random_uniform( train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3)) (train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs) logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes]) [train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables() tf.compat.v1.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform( eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3)) (eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False, logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
spatial_squeeze=False) spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(), self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes]) [eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2]) logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(logits, 1) predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self): def testForward(self):
batch_size = 1 batch_size = 1
height, width = 231, 231 height, width = 231, 231
with self.test_session() as sess: with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3)) inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs) logits, _ = overfeat.overfeat(inputs)
sess.run(tf.global_variables_initializer()) sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits) output = sess.run(logits)
self.assertTrue(output.any()) self.assertTrue(output.any())
......
...@@ -58,7 +58,8 @@ def pix2pix_arg_scope(): ...@@ -58,7 +58,8 @@ def pix2pix_arg_scope():
[layers.conv2d, layers.conv2d_transpose], [layers.conv2d, layers.conv2d_transpose],
normalizer_fn=layers.instance_norm, normalizer_fn=layers.instance_norm,
normalizer_params=instance_norm_params, normalizer_params=instance_norm_params,
weights_initializer=tf.random_normal_initializer(0, 0.02)) as sc: weights_initializer=tf.compat.v1.random_normal_initializer(0,
0.02)) as sc:
return sc return sc
...@@ -80,13 +81,14 @@ def upsample(net, num_outputs, kernel_size, method='nn_upsample_conv'): ...@@ -80,13 +81,14 @@ def upsample(net, num_outputs, kernel_size, method='nn_upsample_conv'):
Raises: Raises:
ValueError: if `method` is not recognized. ValueError: if `method` is not recognized.
""" """
net_shape = tf.shape(net) net_shape = tf.shape(input=net)
height = net_shape[1] height = net_shape[1]
width = net_shape[2] width = net_shape[2]
if method == 'nn_upsample_conv': if method == 'nn_upsample_conv':
net = tf.image.resize_nearest_neighbor( net = tf.image.resize(
net, [kernel_size[0] * height, kernel_size[1] * width]) net, [kernel_size[0] * height, kernel_size[1] * width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
net = layers.conv2d(net, num_outputs, [4, 4], activation_fn=None) net = layers.conv2d(net, num_outputs, [4, 4], activation_fn=None)
elif method == 'conv2d_transpose': elif method == 'conv2d_transpose':
net = layers.conv2d_transpose( net = layers.conv2d_transpose(
...@@ -166,7 +168,7 @@ def pix2pix_generator(net, ...@@ -166,7 +168,7 @@ def pix2pix_generator(net,
########### ###########
# Encoder # # Encoder #
########### ###########
with tf.variable_scope('encoder'): with tf.compat.v1.variable_scope('encoder'):
with contrib_framework.arg_scope([layers.conv2d], with contrib_framework.arg_scope([layers.conv2d],
kernel_size=[4, 4], kernel_size=[4, 4],
stride=2, stride=2,
...@@ -194,7 +196,7 @@ def pix2pix_generator(net, ...@@ -194,7 +196,7 @@ def pix2pix_generator(net,
reversed_blocks = list(blocks) reversed_blocks = list(blocks)
reversed_blocks.reverse() reversed_blocks.reverse()
with tf.variable_scope('decoder'): with tf.compat.v1.variable_scope('decoder'):
# Dropout is used at both train and test time as per 'Image-to-Image', # Dropout is used at both train and test time as per 'Image-to-Image',
# Section 2.1 (last paragraph). # Section 2.1 (last paragraph).
with contrib_framework.arg_scope([layers.dropout], is_training=True): with contrib_framework.arg_scope([layers.dropout], is_training=True):
...@@ -210,7 +212,7 @@ def pix2pix_generator(net, ...@@ -210,7 +212,7 @@ def pix2pix_generator(net,
net = layers.dropout(net, keep_prob=block.decoder_keep_prob) net = layers.dropout(net, keep_prob=block.decoder_keep_prob)
end_points['decoder%d' % block_id] = net end_points['decoder%d' % block_id] = net
with tf.variable_scope('output'): with tf.compat.v1.variable_scope('output'):
# Explicitly set the normalizer_fn to None to override any default value # Explicitly set the normalizer_fn to None to override any default value
# that may come from an arg_scope, such as pix2pix_arg_scope. # that may come from an arg_scope, such as pix2pix_arg_scope.
logits = layers.conv2d( logits = layers.conv2d(
...@@ -249,11 +251,11 @@ def pix2pix_discriminator(net, num_filters, padding=2, pad_mode='REFLECT', ...@@ -249,11 +251,11 @@ def pix2pix_discriminator(net, num_filters, padding=2, pad_mode='REFLECT',
def padded(net, scope): def padded(net, scope):
if padding: if padding:
with tf.variable_scope(scope): with tf.compat.v1.variable_scope(scope):
spatial_pad = tf.constant( spatial_pad = tf.constant(
[[0, 0], [padding, padding], [padding, padding], [0, 0]], [[0, 0], [padding, padding], [padding, padding], [0, 0]],
dtype=tf.int32) dtype=tf.int32)
return tf.pad(net, spatial_pad, pad_mode) return tf.pad(tensor=net, paddings=spatial_pad, mode=pad_mode)
else: else:
return net return net
......
...@@ -42,7 +42,7 @@ class GeneratorTest(tf.test.TestCase): ...@@ -42,7 +42,7 @@ class GeneratorTest(tf.test.TestCase):
upsample_method='nn_upsample_conv') upsample_method='nn_upsample_conv')
with self.test_session() as session: with self.test_session() as session:
session.run(tf.global_variables_initializer()) session.run(tf.compat.v1.global_variables_initializer())
np_outputs = session.run(logits) np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs], self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape)) list(np_outputs.shape))
...@@ -59,7 +59,7 @@ class GeneratorTest(tf.test.TestCase): ...@@ -59,7 +59,7 @@ class GeneratorTest(tf.test.TestCase):
upsample_method='conv2d_transpose') upsample_method='conv2d_transpose')
with self.test_session() as session: with self.test_session() as session:
session.run(tf.global_variables_initializer()) session.run(tf.compat.v1.global_variables_initializer())
np_outputs = session.run(logits) np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs], self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape)) list(np_outputs.shape))
......
This diff is collapsed.
...@@ -117,8 +117,9 @@ def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None): ...@@ -117,8 +117,9 @@ def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
pad_total = kernel_size_effective - 1 pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2 pad_beg = pad_total // 2
pad_end = pad_total - pad_beg pad_end = pad_total - pad_beg
inputs = tf.pad(inputs, inputs = tf.pad(
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) tensor=inputs,
paddings=[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride, return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,
rate=rate, padding='VALID', scope=scope) rate=rate, padding='VALID', scope=scope)
...@@ -180,7 +181,7 @@ def stack_blocks_dense(net, blocks, output_stride=None, ...@@ -180,7 +181,7 @@ def stack_blocks_dense(net, blocks, output_stride=None,
rate = 1 rate = 1
for block in blocks: for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc: with tf.compat.v1.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1 block_stride = 1
for i, unit in enumerate(block.args): for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args) - 1: if store_non_strided_activations and i == len(block.args) - 1:
...@@ -188,7 +189,7 @@ def stack_blocks_dense(net, blocks, output_stride=None, ...@@ -188,7 +189,7 @@ def stack_blocks_dense(net, blocks, output_stride=None,
block_stride = unit.get('stride', 1) block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1) unit = dict(unit, stride=1)
with tf.variable_scope('unit_%d' % (i + 1), values=[net]): with tf.compat.v1.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ # If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the # atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers. # current unit's stride for use in subsequent layers.
...@@ -220,13 +221,14 @@ def stack_blocks_dense(net, blocks, output_stride=None, ...@@ -220,13 +221,14 @@ def stack_blocks_dense(net, blocks, output_stride=None,
return net return net
def resnet_arg_scope(weight_decay=0.0001, def resnet_arg_scope(
weight_decay=0.0001,
batch_norm_decay=0.997, batch_norm_decay=0.997,
batch_norm_epsilon=1e-5, batch_norm_epsilon=1e-5,
batch_norm_scale=True, batch_norm_scale=True,
activation_fn=tf.nn.relu, activation_fn=tf.nn.relu,
use_batch_norm=True, use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS): batch_norm_updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope. """Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are TODO(gpapan): The batch-normalization related default values above are
......
...@@ -109,7 +109,7 @@ def bottleneck(inputs, ...@@ -109,7 +109,7 @@ def bottleneck(inputs,
Returns: Returns:
The ResNet unit's output. The ResNet unit's output.
""" """
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc: with tf.compat.v1.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in: if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
...@@ -219,7 +219,8 @@ def resnet_v1(inputs, ...@@ -219,7 +219,8 @@ def resnet_v1(inputs,
Raises: Raises:
ValueError: If the target output_stride is not valid. ValueError: If the target output_stride is not valid.
""" """
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc: with tf.compat.v1.variable_scope(
scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points' end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck, with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense], resnet_utils.stack_blocks_dense],
...@@ -242,7 +243,8 @@ def resnet_v1(inputs, ...@@ -242,7 +243,8 @@ def resnet_v1(inputs,
if global_pool: if global_pool:
# Global average pooling. # Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True) net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], name='pool5', keepdims=True)
end_points['global_pool'] = net end_points['global_pool'] = net
if num_classes: if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
......
This diff is collapsed.
...@@ -84,7 +84,7 @@ def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1, ...@@ -84,7 +84,7 @@ def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
Returns: Returns:
The ResNet unit's output. The ResNet unit's output.
""" """
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc: with tf.compat.v1.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact') preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in: if depth == depth_in:
...@@ -181,7 +181,8 @@ def resnet_v2(inputs, ...@@ -181,7 +181,8 @@ def resnet_v2(inputs,
Raises: Raises:
ValueError: If the target output_stride is not valid. ValueError: If the target output_stride is not valid.
""" """
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc: with tf.compat.v1.variable_scope(
scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points' end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck, with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense], resnet_utils.stack_blocks_dense],
...@@ -211,7 +212,8 @@ def resnet_v2(inputs, ...@@ -211,7 +212,8 @@ def resnet_v2(inputs,
if global_pool: if global_pool:
# Global average pooling. # Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True) net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], name='pool5', keepdims=True)
end_points['global_pool'] = net end_points['global_pool'] = net
if num_classes: if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -56,6 +56,12 @@ def get_preprocessing(name, is_training=False, use_grayscale=False): ...@@ -56,6 +56,12 @@ def get_preprocessing(name, is_training=False, use_grayscale=False):
'mobilenet_v1': inception_preprocessing, 'mobilenet_v1': inception_preprocessing,
'mobilenet_v2': inception_preprocessing, 'mobilenet_v2': inception_preprocessing,
'mobilenet_v2_035': inception_preprocessing, 'mobilenet_v2_035': inception_preprocessing,
'mobilenet_v3_small': inception_preprocessing,
'mobilenet_v3_large': inception_preprocessing,
'mobilenet_v3_small_minimalistic': inception_preprocessing,
'mobilenet_v3_large_minimalistic': inception_preprocessing,
'mobilenet_edgetpu': inception_preprocessing,
'mobilenet_edgetpu_075': inception_preprocessing,
'mobilenet_v2_140': inception_preprocessing, 'mobilenet_v2_140': inception_preprocessing,
'nasnet_mobile': inception_preprocessing, 'nasnet_mobile': inception_preprocessing,
'nasnet_large': inception_preprocessing, 'nasnet_large': inception_preprocessing,
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment