Unverified Commit 451906e4 authored by pkulzc's avatar pkulzc Committed by GitHub
Browse files

Release MobileDet code and model, and require tf_slim installation for OD API. (#8562)



* Merged commit includes the following changes:
311933687  by Sergio Guadarrama:

    Removes spurios use of tf.compat.v2, which results in spurious tf.compat.v1.compat.v2. Adds basic test to nasnet_utils.
    Replaces all remaining import tensorflow as tf with import tensorflow.compat.v1 as tf

--
311766063  by Sergio Guadarrama:

    Removes explicit tf.compat.v1 in all call sites (we already import tf.compat.v1, so this code was  doing tf.compat.v1.compat.v1). The existing code worked in latest version of tensorflow, 2.2, (and 1.15) but not in 1.14 or in 2.0.0a, this CL fixes it.

--
311624958  by Sergio Guadarrama:

    Updates README that doesn't render properly in github documentation

--
310980959  by Sergio Guadarrama:

    Moves research_models/slim off tf.contrib.slim/layers/framework to tf_slim

--
310263156  by Sergio Guadarrama:

    Adds model breakdown for MobilenetV3

--
308640516  by Sergio Guadarrama:

    Internal change

308244396  by Sergio Guadarrama:

    GroupNormalization support for MobilenetV3.

--
307475800  by Sergio Guadarrama:

    Internal change

--
302077708  by Sergio Guadarrama:

    Remove `disable_tf2` behavior from slim py_library targets

--
301208453  by Sergio Guadarrama:

    Automated refactoring to make code Python 3 compatible.

--
300816672  by Sergio Guadarrama:

    Internal change

299433840  by Sergio Guadarrama:

    Internal change

299221609  by Sergio Guadarrama:

    Explicitly disable Tensorflow v2 behaviors for all TF1.x binaries and tests

--
299179617  by Sergio Guadarrama:

    Internal change

299040784  by Sergio Guadarrama:

    Internal change

299036699  by Sergio Guadarrama:

    Internal change

298736510  by Sergio Guadarrama:

    Internal change

298732599  by Sergio Guadarrama:

    Internal change

298729507  by Sergio Guadarrama:

    Internal change

298253328  by Sergio Guadarrama:

    Internal change

297788346  by Sergio Guadarrama:

    Internal change

297785278  by Sergio Guadarrama:

    Internal change

297783127  by Sergio Guadarrama:

    Internal change

297725870  by Sergio Guadarrama:

    Internal change

297721811  by Sergio Guadarrama:

    Internal change

297711347  by Sergio Guadarrama:

    Internal change

297708059  by Sergio Guadarrama:

    Internal change

297701831  by Sergio Guadarrama:

    Internal change

297700038  by Sergio Guadarrama:

    Internal change

297670468  by Sergio Guadarrama:

    Internal change.

--
297350326  by Sergio Guadarrama:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
297201668  by Sergio Guadarrama:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
294483372  by Sergio Guadarrama:

    Internal change

PiperOrigin-RevId: 311933687

* Merged commit includes the following changes:
312578615  by Menglong Zhu:

    Modify the LSTM feature extractors to be python 3 compatible.

--
311264357  by Menglong Zhu:

    Removes contrib.slim

--
308957207  by Menglong Zhu:

    Automated refactoring to make code Python 3 compatible.

--
306976470  by yongzhe:

    Internal change

306777559  by Menglong Zhu:

    Internal change

--
299232507  by lzyuan:

    Internal update.

--
299221735  by lzyuan:

    Add small epsilon on max_range for quantize_op to prevent range collapse.

--

PiperOrigin-RevId: 312578615

* Merged commit includes the following changes:
310447280  by lzc:

    Internal changes.

--

PiperOrigin-RevId: 310447280
Co-authored-by: default avatarSergio Guadarrama <sguada@google.com>
Co-authored-by: default avatarMenglong Zhu <menglong@google.com>
parent 73b5be67
......@@ -17,8 +17,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.compat.v1 as tf
from nets.nasnet import nasnet_utils
......@@ -51,12 +50,20 @@ class NasnetUtilsTest(tf.test.TestCase):
def testGlobalAvgPool(self):
data_formats = ['NHWC', 'NCHW']
inputs = tf.compat.v1.placeholder(tf.float32, (5, 10, 20, 10))
inputs = tf.placeholder(tf.float32, (5, 10, 20, 10))
for data_format in data_formats:
output = nasnet_utils.global_avg_pool(
inputs, data_format)
self.assertEqual(output.shape, [5, 10])
def test_factorized_reduction(self):
data_format = 'NHWC'
output_shape = (5, 10, 20, 16)
inputs = tf.placeholder(tf.float32, (5, 10, 20, 10))
output = nasnet_utils.factorized_reduction(
inputs, 16, stride=1, data_format=data_format)
self.assertSequenceEqual(output_shape, output.shape.as_list())
if __name__ == '__main__':
tf.test.main()
......@@ -22,16 +22,14 @@ from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import training as contrib_training
from nets.nasnet import nasnet
from nets.nasnet import nasnet_utils
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
arg_scope = slim.arg_scope
def large_imagenet_config():
......@@ -147,7 +145,7 @@ def _build_pnasnet_base(images,
# pylint: enable=protected-access
# Final softmax layer
with tf.compat.v1.variable_scope('final_layer'):
with tf.variable_scope('final_layer'):
net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes:
......@@ -176,7 +174,7 @@ def build_pnasnet_large(images,
# pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.compat.v1.logging.info(
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
......@@ -225,7 +223,7 @@ def build_pnasnet_mobile(images,
# pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.compat.v1.logging.info(
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
......
......@@ -17,13 +17,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.nasnet import pnasnet
slim = contrib_slim
class PNASNetTest(tf.test.TestCase):
......@@ -32,7 +30,7 @@ class PNASNetTest(tf.test.TestCase):
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
......@@ -49,7 +47,7 @@ class PNASNetTest(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
......@@ -64,20 +62,20 @@ class PNASNetTest(tf.test.TestCase):
def testBuildNonExistingLayerLargeModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random.uniform((5, 331, 331, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
pnasnet.build_pnasnet_large(inputs, 1000)
vars_names = [x.op.name for x in tf.compat.v1.trainable_variables()]
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildNonExistingLayerMobileModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random.uniform((5, 224, 224, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
pnasnet.build_pnasnet_mobile(inputs, 1000)
vars_names = [x.op.name for x in tf.compat.v1.trainable_variables()]
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
......@@ -86,7 +84,7 @@ class PNASNetTest(tf.test.TestCase):
height, width = 331, 331
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
......@@ -99,7 +97,7 @@ class PNASNetTest(tf.test.TestCase):
height, width = 224, 224
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
net, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
......@@ -112,7 +110,7 @@ class PNASNetTest(tf.test.TestCase):
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
......@@ -138,7 +136,7 @@ class PNASNetTest(tf.test.TestCase):
self.assertEqual(len(end_points), 17)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.compat.v1.logging.info('Endpoint name: {}'.format(endpoint_name))
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
......@@ -149,7 +147,7 @@ class PNASNetTest(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
......@@ -173,7 +171,7 @@ class PNASNetTest(tf.test.TestCase):
self.assertEqual(len(end_points), 14)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.compat.v1.logging.info('Endpoint name: {}'.format(endpoint_name))
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
......@@ -184,9 +182,9 @@ class PNASNetTest(tf.test.TestCase):
height, width = 331, 331
num_classes = 1000
for use_aux_head in (True, False):
tf.compat.v1.reset_default_graph()
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
......@@ -199,9 +197,9 @@ class PNASNetTest(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
for use_aux_head in (True, False):
tf.compat.v1.reset_default_graph()
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
......@@ -214,7 +212,7 @@ class PNASNetTest(tf.test.TestCase):
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
......@@ -228,7 +226,7 @@ class PNASNetTest(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.compat.v1.train.create_global_step()
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
......@@ -242,14 +240,14 @@ class PNASNetTest(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
for use_bounded_activation in (True, False):
tf.compat.v1.reset_default_graph()
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, _ = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
for node in tf.compat.v1.get_default_graph().as_graph_def().node:
for node in tf.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'):
self.assertEqual(node.op == 'Relu6', use_bounded_activation)
......
......@@ -18,7 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib import slim as contrib_slim
import tf_slim as slim
from nets import alexnet
from nets import cifarnet
......@@ -37,8 +37,6 @@ from nets.nasnet import nasnet
from nets.nasnet import pnasnet
slim = contrib_slim
networks_map = {
'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
......
......@@ -20,7 +20,7 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.compat.v1 as tf
from nets import nets_factory
......
......@@ -31,13 +31,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
......@@ -45,7 +43,7 @@ def overfeat_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.compat.v1.zeros_initializer()):
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
......@@ -91,7 +89,7 @@ def overfeat(inputs,
None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.compat.v1.variable_scope(scope, 'overfeat', [inputs]) as sc:
with tf.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
......@@ -110,7 +108,7 @@ def overfeat(inputs,
with slim.arg_scope(
[slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.compat.v1.constant_initializer(0.1)):
biases_initializer=tf.constant_initializer(0.1)):
net = slim.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
......@@ -130,7 +128,7 @@ def overfeat(inputs,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.compat.v1.zeros_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
......
......@@ -17,13 +17,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import overfeat
slim = contrib_slim
class OverFeatTest(tf.test.TestCase):
......@@ -154,7 +152,7 @@ class OverFeatTest(tf.test.TestCase):
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
......@@ -171,7 +169,7 @@ class OverFeatTest(tf.test.TestCase):
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
......
......@@ -32,11 +32,8 @@ from __future__ import print_function
import collections
import functools
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
layers = contrib_layers
import tensorflow.compat.v1 as tf
import tf_slim as slim
def pix2pix_arg_scope():
......@@ -54,12 +51,11 @@ def pix2pix_arg_scope():
'epsilon': 0.00001,
}
with contrib_framework.arg_scope(
[layers.conv2d, layers.conv2d_transpose],
normalizer_fn=layers.instance_norm,
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
normalizer_fn=slim.instance_norm,
normalizer_params=instance_norm_params,
weights_initializer=tf.compat.v1.random_normal_initializer(0,
0.02)) as sc:
weights_initializer=tf.random_normal_initializer(0, 0.02)) as sc:
return sc
......@@ -89,9 +85,9 @@ def upsample(net, num_outputs, kernel_size, method='nn_upsample_conv'):
net = tf.image.resize(
net, [kernel_size[0] * height, kernel_size[1] * width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
net = layers.conv2d(net, num_outputs, [4, 4], activation_fn=None)
net = slim.conv2d(net, num_outputs, [4, 4], activation_fn=None)
elif method == 'conv2d_transpose':
net = layers.conv2d_transpose(
net = slim.conv2d_transpose(
net, num_outputs, [4, 4], stride=kernel_size, activation_fn=None)
else:
raise ValueError('Unknown method: [%s]' % method)
......@@ -168,23 +164,23 @@ def pix2pix_generator(net,
###########
# Encoder #
###########
with tf.compat.v1.variable_scope('encoder'):
with contrib_framework.arg_scope([layers.conv2d],
kernel_size=[4, 4],
stride=2,
activation_fn=tf.nn.leaky_relu):
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.conv2d],
kernel_size=[4, 4],
stride=2,
activation_fn=tf.nn.leaky_relu):
for block_id, block in enumerate(blocks):
# No normalizer for the first encoder layers as per 'Image-to-Image',
# Section 5.1.1
if block_id == 0:
# First layer doesn't use normalizer_fn
net = layers.conv2d(net, block.num_filters, normalizer_fn=None)
net = slim.conv2d(net, block.num_filters, normalizer_fn=None)
elif block_id < len(blocks) - 1:
net = layers.conv2d(net, block.num_filters)
net = slim.conv2d(net, block.num_filters)
else:
# Last layer doesn't use activation_fn nor normalizer_fn
net = layers.conv2d(
net = slim.conv2d(
net, block.num_filters, activation_fn=None, normalizer_fn=None)
encoder_activations.append(net)
......@@ -196,10 +192,10 @@ def pix2pix_generator(net,
reversed_blocks = list(blocks)
reversed_blocks.reverse()
with tf.compat.v1.variable_scope('decoder'):
with tf.variable_scope('decoder'):
# Dropout is used at both train and test time as per 'Image-to-Image',
# Section 2.1 (last paragraph).
with contrib_framework.arg_scope([layers.dropout], is_training=True):
with slim.arg_scope([slim.dropout], is_training=True):
for block_id, block in enumerate(reversed_blocks):
if block_id > 0:
......@@ -209,13 +205,13 @@ def pix2pix_generator(net,
net = tf.nn.relu(net)
net = upsample_fn(net, block.num_filters, [2, 2])
if block.decoder_keep_prob > 0:
net = layers.dropout(net, keep_prob=block.decoder_keep_prob)
net = slim.dropout(net, keep_prob=block.decoder_keep_prob)
end_points['decoder%d' % block_id] = net
with tf.compat.v1.variable_scope('output'):
with tf.variable_scope('output'):
# Explicitly set the normalizer_fn to None to override any default value
# that may come from an arg_scope, such as pix2pix_arg_scope.
logits = layers.conv2d(
logits = slim.conv2d(
net, num_outputs, [4, 4], activation_fn=None, normalizer_fn=None)
logits = tf.reshape(logits, input_size)
......@@ -236,7 +232,7 @@ def pix2pix_discriminator(net, num_filters, padding=2, pad_mode='REFLECT',
list determines the number of layers in the discriminator.
padding: Amount of reflection padding applied before each convolution.
pad_mode: mode for tf.pad, one of "CONSTANT", "REFLECT", or "SYMMETRIC".
activation_fn: activation fn for layers.conv2d.
activation_fn: activation fn for slim.conv2d.
is_training: Whether or not the model is training or testing.
Returns:
......@@ -251,7 +247,7 @@ def pix2pix_discriminator(net, num_filters, padding=2, pad_mode='REFLECT',
def padded(net, scope):
if padding:
with tf.compat.v1.variable_scope(scope):
with tf.variable_scope(scope):
spatial_pad = tf.constant(
[[0, 0], [padding, padding], [padding, padding], [0, 0]],
dtype=tf.int32)
......@@ -259,25 +255,25 @@ def pix2pix_discriminator(net, num_filters, padding=2, pad_mode='REFLECT',
else:
return net
with contrib_framework.arg_scope([layers.conv2d],
kernel_size=[4, 4],
stride=2,
padding='valid',
activation_fn=activation_fn):
with slim.arg_scope([slim.conv2d],
kernel_size=[4, 4],
stride=2,
padding='valid',
activation_fn=activation_fn):
# No normalization on the input layer.
net = layers.conv2d(
net = slim.conv2d(
padded(net, 'conv0'), num_filters[0], normalizer_fn=None, scope='conv0')
end_points['conv0'] = net
for i in range(1, num_layers - 1):
net = layers.conv2d(
net = slim.conv2d(
padded(net, 'conv%d' % i), num_filters[i], scope='conv%d' % i)
end_points['conv%d' % i] = net
# Stride 1 on the last layer.
net = layers.conv2d(
net = slim.conv2d(
padded(net, 'conv%d' % (num_layers - 1)),
num_filters[-1],
stride=1,
......@@ -285,7 +281,7 @@ def pix2pix_discriminator(net, num_filters, padding=2, pad_mode='REFLECT',
end_points['conv%d' % (num_layers - 1)] = net
# 1-dim logits, stride 1, no activation, no normalization.
logits = layers.conv2d(
logits = slim.conv2d(
padded(net, 'conv%d' % num_layers),
1,
stride=1,
......
......@@ -18,8 +18,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import pix2pix
......@@ -36,13 +36,13 @@ class GeneratorTest(tf.test.TestCase):
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='nn_upsample_conv')
with self.test_session() as session:
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
......@@ -53,13 +53,13 @@ class GeneratorTest(tf.test.TestCase):
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='conv2d_transpose')
with self.test_session() as session:
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
......@@ -74,7 +74,7 @@ class GeneratorTest(tf.test.TestCase):
pix2pix.Block(64, 0.5),
pix2pix.Block(128, 0),
]
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
_, end_points = pix2pix.pix2pix_generator(
images, num_outputs, blocks)
......@@ -106,7 +106,7 @@ class DiscriminatorTest(tf.test.TestCase):
output_size = self._layer_output_size(output_size, stride=1)
images = tf.ones((batch_size, input_size, input_size, 3))
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512])
self.assertListEqual([batch_size, output_size, output_size, 1],
......@@ -125,7 +125,7 @@ class DiscriminatorTest(tf.test.TestCase):
output_size = self._layer_output_size(output_size, stride=1, pad=0)
images = tf.ones((batch_size, input_size, input_size, 3))
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=0)
self.assertListEqual([batch_size, output_size, output_size, 1],
......@@ -138,7 +138,7 @@ class DiscriminatorTest(tf.test.TestCase):
input_size = 256
images = tf.ones((batch_size, input_size, input_size, 3))
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
with self.assertRaises(TypeError):
pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=1.5)
......@@ -148,7 +148,7 @@ class DiscriminatorTest(tf.test.TestCase):
input_size = 256
images = tf.ones((batch_size, input_size, input_size, 3))
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
with self.assertRaises(ValueError):
pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=-1)
......
......@@ -21,7 +21,7 @@ from __future__ import print_function
import functools
from absl import app
from absl import flags
import tensorflow as tf
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from nets import nets_factory
from preprocessing import preprocessing_factory
......@@ -127,7 +127,7 @@ def _representative_dataset_gen():
dataset = tfds.builder(FLAGS.dataset_name, data_dir=FLAGS.dataset_dir)
dataset.download_and_prepare()
data = dataset.as_dataset()[FLAGS.dataset_split]
iterator = tf.compat.v1.data.make_one_shot_iterator(data)
iterator = tf.data.make_one_shot_iterator(data)
if FLAGS.use_model_specific_preprocessing:
preprocess_fn = functools.partial(
preprocessing_factory.get_preprocessing(name=FLAGS.model_name),
......
......@@ -38,10 +38,8 @@ from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
......@@ -181,7 +179,7 @@ def stack_blocks_dense(net, blocks, output_stride=None,
rate = 1
for block in blocks:
with tf.compat.v1.variable_scope(block.scope, 'block', [net]) as sc:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1
for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args) - 1:
......@@ -189,7 +187,7 @@ def stack_blocks_dense(net, blocks, output_stride=None,
block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1)
with tf.compat.v1.variable_scope('unit_%d' % (i + 1), values=[net]):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
......@@ -228,7 +226,7 @@ def resnet_arg_scope(
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True,
batch_norm_updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS):
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
......
......@@ -34,7 +34,7 @@ units.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v1
from tf_slim.nets import resnet_v1
ResNet-101 for image classification into 1000 classes:
......@@ -56,14 +56,13 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
slim = contrib_slim
class NoOpScope(object):
......@@ -109,7 +108,7 @@ def bottleneck(inputs,
Returns:
The ResNet unit's output.
"""
with tf.compat.v1.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
......@@ -219,7 +218,7 @@ def resnet_v1(inputs,
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
......
......@@ -19,15 +19,13 @@ from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import resnet_utils
from nets import resnet_v1
slim = contrib_slim
tf.compat.v1.disable_resource_variables()
tf.disable_resource_variables()
def create_test_input(batch_size, height, width, channels):
......@@ -45,8 +43,7 @@ def create_test_input(batch_size, height, width, channels):
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.compat.v1.placeholder(tf.float32,
(batch_size, height, width, channels))
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.cast(
np.tile(
......@@ -83,9 +80,9 @@ class ResnetUtilsTest(tf.test.TestCase):
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.compat.v1.get_variable('Conv/weights', initializer=w)
tf.compat.v1.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.cast([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46],
......@@ -105,7 +102,7 @@ class ResnetUtilsTest(tf.test.TestCase):
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
......@@ -121,9 +118,9 @@ class ResnetUtilsTest(tf.test.TestCase):
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.compat.v1.get_variable('Conv/weights', initializer=w)
tf.compat.v1.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.cast(
......@@ -144,7 +141,7 @@ class ResnetUtilsTest(tf.test.TestCase):
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
......@@ -152,7 +149,7 @@ class ResnetUtilsTest(tf.test.TestCase):
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.compat.v1.variable_scope(scope, values=[inputs]):
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
......@@ -189,9 +186,9 @@ class ResnetUtilsTest(tf.test.TestCase):
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.compat.v1.variable_scope(block.scope, 'block', [net]):
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with tf.compat.v1.variable_scope('unit_%d' % (i + 1), values=[net]):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
......@@ -219,7 +216,7 @@ class ResnetUtilsTest(tf.test.TestCase):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.compat.v1.set_random_seed(0)
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
......@@ -232,10 +229,10 @@ class ResnetUtilsTest(tf.test.TestCase):
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
......@@ -262,7 +259,7 @@ class ResnetUtilsTest(tf.test.TestCase):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.compat.v1.set_random_seed(0)
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Subsampling at the last unit of the block.
......@@ -274,7 +271,7 @@ class ResnetUtilsTest(tf.test.TestCase):
'output')
# Make the two networks use the same weights.
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
# Subsample activations at the end of the blocks.
expected = resnet_utils.stack_blocks_dense(
......@@ -284,7 +281,7 @@ class ResnetUtilsTest(tf.test.TestCase):
expected_end_points = slim.utils.convert_collection_to_dict(
'expected')
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
# Make sure that the final output is the same.
output, expected = sess.run([output, expected])
......@@ -475,7 +472,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.compat.v1.set_random_seed(0)
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None, is_training=False,
......@@ -487,11 +484,11 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
......@@ -511,7 +508,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
......@@ -526,7 +523,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
......@@ -545,7 +542,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
......
......@@ -28,7 +28,7 @@ The key difference of the full preactivation 'v2' variant compared to the
Typical use:
from tensorflow.contrib.slim.nets import resnet_v2
from tf_slim.nets import resnet_v2
ResNet-101 for image classification into 1000 classes:
......@@ -50,12 +50,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import resnet_utils
slim = contrib_slim
resnet_arg_scope = resnet_utils.resnet_arg_scope
......@@ -84,7 +83,7 @@ def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
Returns:
The ResNet unit's output.
"""
with tf.compat.v1.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
......@@ -181,7 +180,7 @@ def resnet_v2(inputs,
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
......
......@@ -19,15 +19,13 @@ from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import resnet_utils
from nets import resnet_v2
slim = contrib_slim
tf.compat.v1.disable_resource_variables()
tf.disable_resource_variables()
def create_test_input(batch_size, height, width, channels):
......@@ -45,8 +43,7 @@ def create_test_input(batch_size, height, width, channels):
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.compat.v1.placeholder(tf.float32,
(batch_size, height, width, channels))
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.cast(
np.tile(
......@@ -83,9 +80,9 @@ class ResnetUtilsTest(tf.test.TestCase):
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.compat.v1.get_variable('Conv/weights', initializer=w)
tf.compat.v1.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.cast([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46],
......@@ -105,7 +102,7 @@ class ResnetUtilsTest(tf.test.TestCase):
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
......@@ -121,9 +118,9 @@ class ResnetUtilsTest(tf.test.TestCase):
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.compat.v1.get_variable('Conv/weights', initializer=w)
tf.compat.v1.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.cast(
......@@ -144,7 +141,7 @@ class ResnetUtilsTest(tf.test.TestCase):
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
......@@ -152,7 +149,7 @@ class ResnetUtilsTest(tf.test.TestCase):
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.compat.v1.variable_scope(scope, values=[inputs]):
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
......@@ -189,9 +186,9 @@ class ResnetUtilsTest(tf.test.TestCase):
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.compat.v1.variable_scope(block.scope, 'block', [net]):
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with tf.compat.v1.variable_scope('unit_%d' % (i + 1), values=[net]):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
......@@ -219,7 +216,7 @@ class ResnetUtilsTest(tf.test.TestCase):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.compat.v1.set_random_seed(0)
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
......@@ -232,10 +229,10 @@ class ResnetUtilsTest(tf.test.TestCase):
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
......@@ -392,7 +389,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.compat.v1.set_random_seed(0)
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None,
......@@ -405,12 +402,12 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None,
is_training=False,
global_pool=False)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
......@@ -430,7 +427,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
......@@ -446,7 +443,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
......@@ -465,7 +462,7 @@ class ResnetCompleteNetworkTest(tf.test.TestCase):
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
......
......@@ -24,22 +24,19 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import i3d_utils
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
conv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal
inception_block_v1_3d = i3d_utils.inception_block_v1_3d
# Orignaly, arg_scope = slim.arg_scope and layers = slim, now switch to more
# update-to-date tf.contrib.* API.
arg_scope = contrib_framework.arg_scope
layers = contrib_layers
arg_scope = slim.arg_scope
def s3dg_arg_scope(weight_decay=1e-7,
......@@ -72,12 +69,11 @@ def s3dg_arg_scope(weight_decay=1e-7,
}
}
with arg_scope(
[layers.conv3d, conv3d_spatiotemporal],
weights_regularizer=layers.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([slim.conv3d, conv3d_spatiotemporal],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([conv3d_spatiotemporal], separable=True) as sc:
return sc
......@@ -115,13 +111,13 @@ def self_gating(input_tensor, scope, data_format='NDHWC'):
h = input_shape[index_h]
num_channels = input_shape[index_c]
spatiotemporal_average = layers.avg_pool3d(
spatiotemporal_average = slim.avg_pool3d(
input_tensor, [t, w, h],
stride=1,
data_format=data_format,
scope=scope + '/self_gating/avg_pool3d')
weights = layers.conv3d(
weights = slim.conv3d(
spatiotemporal_average,
num_channels, [1, 1, 1],
activation_fn=None,
......@@ -211,13 +207,12 @@ def s3dg_base(inputs,
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.compat.v1.variable_scope(scope, 'InceptionV1', [inputs]):
with arg_scope([layers.conv3d], weights_initializer=trunc_normal(0.01)):
with arg_scope(
[layers.conv3d, layers.max_pool3d, conv3d_spatiotemporal],
stride=1,
data_format=data_format,
padding='SAME'):
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with arg_scope([slim.conv3d], weights_initializer=trunc_normal(0.01)):
with arg_scope([slim.conv3d, slim.max_pool3d, conv3d_spatiotemporal],
stride=1,
data_format=data_format,
padding='SAME'):
# batch_size x 32 x 112 x 112 x 64
end_point = 'Conv2d_1a_7x7'
if first_temporal_kernel_size not in [1, 3, 5, 7]:
......@@ -235,14 +230,13 @@ def s3dg_base(inputs,
return net, end_points
# batch_size x 32 x 56 x 56 x 64
end_point = 'MaxPool_2a_3x3'
net = layers.max_pool3d(
net, [1, 3, 3], stride=[1, 2, 2], scope=end_point)
net = slim.max_pool3d(net, [1, 3, 3], stride=[1, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = layers.conv3d(net, depth(64), [1, 1, 1], scope=end_point)
net = slim.conv3d(net, depth(64), [1, 1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
......@@ -261,8 +255,7 @@ def s3dg_base(inputs,
return net, end_points
# batch_size x 32 x 28 x 28 x 192
end_point = 'MaxPool_3a_3x3'
net = layers.max_pool3d(
net, [1, 3, 3], stride=[1, 2, 2], scope=end_point)
net = slim.max_pool3d(net, [1, 3, 3], stride=[1, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
......@@ -313,8 +306,7 @@ def s3dg_base(inputs,
return net, end_points
end_point = 'MaxPool_4a_3x3'
net = layers.max_pool3d(
net, [3, 3, 3], stride=[2, 2, 2], scope=end_point)
net = slim.max_pool3d(net, [3, 3, 3], stride=[2, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
......@@ -435,8 +427,7 @@ def s3dg_base(inputs,
return net, end_points
end_point = 'MaxPool_5a_2x2'
net = layers.max_pool3d(
net, [2, 2, 2], stride=[2, 2, 2], scope=end_point)
net = slim.max_pool3d(net, [2, 2, 2], stride=[2, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
......@@ -499,7 +490,7 @@ def s3dg(inputs,
depth_multiplier=1.0,
dropout_keep_prob=0.8,
is_training=True,
prediction_fn=layers.softmax,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
data_format='NDHWC',
......@@ -558,10 +549,9 @@ def s3dg(inputs,
"""
assert data_format in ['NDHWC', 'NCDHW']
# Final pooling and prediction
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers.batch_norm, layers.dropout], is_training=is_training):
with arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
net, end_points = s3dg_base(
inputs,
first_temporal_kernel_size=first_temporal_kernel_size,
......@@ -572,18 +562,18 @@ def s3dg(inputs,
depth_multiplier=depth_multiplier,
data_format=data_format,
scope=scope)
with tf.compat.v1.variable_scope('Logits'):
with tf.variable_scope('Logits'):
if data_format.startswith('NC'):
net = tf.transpose(a=net, perm=[0, 2, 3, 4, 1])
kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])
net = layers.avg_pool3d(
net = slim.avg_pool3d(
net,
kernel_size,
stride=1,
data_format='NDHWC',
scope='AvgPool_0a_7x7')
net = layers.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = layers.conv3d(
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv3d(
net,
num_classes, [1, 1, 1],
activation_fn=None,
......
......@@ -18,7 +18,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import six
import tensorflow.compat.v1 as tf
from nets import s3dg
......@@ -55,7 +56,7 @@ class S3DGTest(tf.test.TestCase):
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
self.assertItemsEqual(list(end_points.keys()), expected_endpoints)
def testBuildOnlyUptoFinalEndpointNoGating(self):
batch_size = 5
......@@ -101,8 +102,9 @@ class S3DGTest(tf.test.TestCase):
'Mixed_5b': [5, 8, 7, 7, 832],
'Mixed_5c': [5, 8, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.iteritems():
self.assertItemsEqual(
list(endpoints_shapes.keys()), list(end_points.keys()))
for endpoint_name, expected_shape in six.iteritems(endpoints_shapes):
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
......@@ -141,7 +143,7 @@ class S3DGTest(tf.test.TestCase):
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
......
......@@ -41,10 +41,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
def vgg_arg_scope(weight_decay=0.0005):
......@@ -59,7 +57,7 @@ def vgg_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.compat.v1.zeros_initializer()):
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
......@@ -105,7 +103,7 @@ def vgg_a(inputs,
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.compat.v1.variable_scope(scope, 'vgg_a', [inputs], reuse=reuse) as sc:
with tf.variable_scope(scope, 'vgg_a', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
......@@ -187,7 +185,7 @@ def vgg_16(inputs,
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, 'vgg_16', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
......@@ -271,7 +269,7 @@ def vgg_19(inputs,
None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, 'vgg_19', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
......
......@@ -17,13 +17,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import vgg
slim = contrib_slim
class VGGATest(tf.test.TestCase):
......@@ -170,7 +168,7 @@ class VGGATest(tf.test.TestCase):
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
......@@ -187,7 +185,7 @@ class VGGATest(tf.test.TestCase):
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
......@@ -357,7 +355,7 @@ class VGG16Test(tf.test.TestCase):
logits, _ = vgg.vgg_16(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False,
......@@ -374,7 +372,7 @@ class VGG16Test(tf.test.TestCase):
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
......@@ -559,7 +557,7 @@ class VGG19Test(tf.test.TestCase):
logits, _ = vgg.vgg_19(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
......@@ -576,7 +574,7 @@ class VGG19Test(tf.test.TestCase):
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
......
......@@ -20,12 +20,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
_PADDING = 4
slim = contrib_slim
_PADDING = 4
def preprocess_for_train(image,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment