Unverified Commit 451906e4 authored by pkulzc's avatar pkulzc Committed by GitHub
Browse files

Release MobileDet code and model, and require tf_slim installation for OD API. (#8562)



* Merged commit includes the following changes:
311933687  by Sergio Guadarrama:

    Removes spurios use of tf.compat.v2, which results in spurious tf.compat.v1.compat.v2. Adds basic test to nasnet_utils.
    Replaces all remaining import tensorflow as tf with import tensorflow.compat.v1 as tf

--
311766063  by Sergio Guadarrama:

    Removes explicit tf.compat.v1 in all call sites (we already import tf.compat.v1, so this code was  doing tf.compat.v1.compat.v1). The existing code worked in latest version of tensorflow, 2.2, (and 1.15) but not in 1.14 or in 2.0.0a, this CL fixes it.

--
311624958  by Sergio Guadarrama:

    Updates README that doesn't render properly in github documentation

--
310980959  by Sergio Guadarrama:

    Moves research_models/slim off tf.contrib.slim/layers/framework to tf_slim

--
310263156  by Sergio Guadarrama:

    Adds model breakdown for MobilenetV3

--
308640516  by Sergio Guadarrama:

    Internal change

308244396  by Sergio Guadarrama:

    GroupNormalization support for MobilenetV3.

--
307475800  by Sergio Guadarrama:

    Internal change

--
302077708  by Sergio Guadarrama:

    Remove `disable_tf2` behavior from slim py_library targets

--
301208453  by Sergio Guadarrama:

    Automated refactoring to make code Python 3 compatible.

--
300816672  by Sergio Guadarrama:

    Internal change

299433840  by Sergio Guadarrama:

    Internal change

299221609  by Sergio Guadarrama:

    Explicitly disable Tensorflow v2 behaviors for all TF1.x binaries and tests

--
299179617  by Sergio Guadarrama:

    Internal change

299040784  by Sergio Guadarrama:

    Internal change

299036699  by Sergio Guadarrama:

    Internal change

298736510  by Sergio Guadarrama:

    Internal change

298732599  by Sergio Guadarrama:

    Internal change

298729507  by Sergio Guadarrama:

    Internal change

298253328  by Sergio Guadarrama:

    Internal change

297788346  by Sergio Guadarrama:

    Internal change

297785278  by Sergio Guadarrama:

    Internal change

297783127  by Sergio Guadarrama:

    Internal change

297725870  by Sergio Guadarrama:

    Internal change

297721811  by Sergio Guadarrama:

    Internal change

297711347  by Sergio Guadarrama:

    Internal change

297708059  by Sergio Guadarrama:

    Internal change

297701831  by Sergio Guadarrama:

    Internal change

297700038  by Sergio Guadarrama:

    Internal change

297670468  by Sergio Guadarrama:

    Internal change.

--
297350326  by Sergio Guadarrama:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
297201668  by Sergio Guadarrama:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
294483372  by Sergio Guadarrama:

    Internal change

PiperOrigin-RevId: 311933687

* Merged commit includes the following changes:
312578615  by Menglong Zhu:

    Modify the LSTM feature extractors to be python 3 compatible.

--
311264357  by Menglong Zhu:

    Removes contrib.slim

--
308957207  by Menglong Zhu:

    Automated refactoring to make code Python 3 compatible.

--
306976470  by yongzhe:

    Internal change

306777559  by Menglong Zhu:

    Internal change

--
299232507  by lzyuan:

    Internal update.

--
299221735  by lzyuan:

    Add small epsilon on max_range for quantize_op to prevent range collapse.

--

PiperOrigin-RevId: 312578615

* Merged commit includes the following changes:
310447280  by lzc:

    Internal changes.

--

PiperOrigin-RevId: 310447280
Co-authored-by: default avatarSergio Guadarrama <sguada@google.com>
Co-authored-by: default avatarMenglong Zhu <menglong@google.com>
parent 73b5be67
......@@ -19,16 +19,15 @@ from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib import slim as contrib_slim
from datasets import dataset_factory
from nets import nets_factory
from preprocessing import preprocessing_factory
slim = contrib_slim
tf.app.flags.DEFINE_integer(
'batch_size', 100, 'The number of samples in each batch.')
......
......@@ -57,17 +57,15 @@ from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib import slim as contrib_slim
from tensorflow.python.platform import gfile
from datasets import dataset_factory
from nets import nets_factory
slim = contrib_slim
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to save.')
......
......@@ -22,7 +22,7 @@ from __future__ import print_function
import os
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.python.platform import gfile
import export_inference_graph
......
......@@ -36,20 +36,18 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
def alexnet_v2_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
biases_initializer=tf.compat.v1.constant_initializer(0.1),
biases_initializer=tf.constant_initializer(0.1),
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
......@@ -97,7 +95,7 @@ def alexnet_v2(inputs,
or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.compat.v1.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
with tf.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
......@@ -116,7 +114,7 @@ def alexnet_v2(inputs,
with slim.arg_scope(
[slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.compat.v1.constant_initializer(0.1)):
biases_initializer=tf.constant_initializer(0.1)):
net = slim.conv2d(net, 4096, [5, 5], padding='VALID',
scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
......@@ -137,7 +135,7 @@ def alexnet_v2(inputs,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.compat.v1.zeros_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
......
......@@ -17,13 +17,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import alexnet
slim = contrib_slim
class AlexnetV2Test(tf.test.TestCase):
......@@ -156,7 +154,7 @@ class AlexnetV2Test(tf.test.TestCase):
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.compat.v1.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
......@@ -173,7 +171,7 @@ class AlexnetV2Test(tf.test.TestCase):
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
......
......@@ -18,13 +18,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
stddev=stddev)
......@@ -63,7 +61,7 @@ def cifarnet(images, num_classes=10, is_training=False,
"""
end_points = {}
with tf.compat.v1.variable_scope(scope, 'CifarNet', [images]):
with tf.variable_scope(scope, 'CifarNet', [images]):
net = slim.conv2d(images, 64, [5, 5], scope='conv1')
end_points['conv1'] = net
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
......@@ -87,7 +85,7 @@ def cifarnet(images, num_classes=10, is_training=False,
logits = slim.fully_connected(
net,
num_classes,
biases_initializer=tf.compat.v1.zeros_initializer(),
biases_initializer=tf.zeros_initializer(),
weights_initializer=trunc_normal(1 / 192.0),
weights_regularizer=None,
activation_fn=None,
......@@ -111,12 +109,12 @@ def cifarnet_arg_scope(weight_decay=0.004):
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.compat.v1.truncated_normal_initializer(
weights_initializer=tf.truncated_normal_initializer(
stddev=5e-2),
activation_fn=tf.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=tf.compat.v1.constant_initializer(0.1),
biases_initializer=tf.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc:
......
......@@ -19,12 +19,9 @@ from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import util as contrib_util
layers = contrib_layers
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.python.framework import tensor_util
def cyclegan_arg_scope(instance_norm_center=True,
......@@ -55,13 +52,13 @@ def cyclegan_arg_scope(instance_norm_center=True,
weights_regularizer = None
if weight_decay and weight_decay > 0.0:
weights_regularizer = layers.l2_regularizer(weight_decay)
weights_regularizer = slim.l2_regularizer(weight_decay)
with contrib_framework.arg_scope(
[layers.conv2d],
normalizer_fn=layers.instance_norm,
with slim.arg_scope(
[slim.conv2d],
normalizer_fn=slim.instance_norm,
normalizer_params=instance_norm_params,
weights_initializer=tf.compat.v1.random_normal_initializer(
weights_initializer=tf.random_normal_initializer(
0, weights_init_stddev),
weights_regularizer=weights_regularizer) as sc:
return sc
......@@ -91,7 +88,7 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose',
Raises:
ValueError: if `method` is not recognized.
"""
with tf.compat.v1.variable_scope('upconv'):
with tf.variable_scope('upconv'):
net_shape = tf.shape(input=net)
height = net_shape[1]
width = net_shape[2]
......@@ -106,19 +103,19 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose',
net, [stride[0] * height, stride[1] * width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
net = tf.pad(tensor=net, paddings=spatial_pad_1, mode=pad_mode)
net = layers.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
net = slim.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
elif method == 'bilinear_upsample_conv':
net = tf.compat.v1.image.resize_bilinear(
net = tf.image.resize_bilinear(
net, [stride[0] * height, stride[1] * width],
align_corners=align_corners)
net = tf.pad(tensor=net, paddings=spatial_pad_1, mode=pad_mode)
net = layers.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
net = slim.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
elif method == 'conv2d_transpose':
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing 'SAME' padding).
# Note: This doesn't reflect actual model in paper.
net = layers.conv2d_transpose(
net = slim.conv2d_transpose(
net, num_outputs, kernel_size=[3, 3], stride=stride, padding='valid')
net = net[:, 1:, 1:, :]
else:
......@@ -129,7 +126,7 @@ def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose',
def _dynamic_or_static_shape(tensor):
shape = tf.shape(input=tensor)
static_shape = contrib_util.constant_value(shape)
static_shape = tensor_util.constant_value(shape)
return static_shape if static_shape is not None else shape
......@@ -201,47 +198,46 @@ def cyclegan_generator_resnet(images,
dtype=np.int32)
spatial_pad_3 = np.array([[0, 0], [3, 3], [3, 3], [0, 0]])
with contrib_framework.arg_scope(arg_scope_fn()):
with slim.arg_scope(arg_scope_fn()):
###########
# Encoder #
###########
with tf.compat.v1.variable_scope('input'):
with tf.variable_scope('input'):
# 7x7 input stage
net = tf.pad(tensor=images, paddings=spatial_pad_3, mode='REFLECT')
net = layers.conv2d(net, num_filters, kernel_size=[7, 7], padding='VALID')
net = slim.conv2d(net, num_filters, kernel_size=[7, 7], padding='VALID')
end_points['encoder_0'] = net
with tf.compat.v1.variable_scope('encoder'):
with contrib_framework.arg_scope([layers.conv2d],
kernel_size=kernel_size,
stride=2,
activation_fn=tf.nn.relu,
padding='VALID'):
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.conv2d],
kernel_size=kernel_size,
stride=2,
activation_fn=tf.nn.relu,
padding='VALID'):
net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
net = layers.conv2d(net, num_filters * 2)
net = slim.conv2d(net, num_filters * 2)
end_points['encoder_1'] = net
net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
net = layers.conv2d(net, num_filters * 4)
net = slim.conv2d(net, num_filters * 4)
end_points['encoder_2'] = net
###################
# Residual Blocks #
###################
with tf.compat.v1.variable_scope('residual_blocks'):
with contrib_framework.arg_scope([layers.conv2d],
kernel_size=kernel_size,
stride=1,
activation_fn=tf.nn.relu,
padding='VALID'):
with tf.variable_scope('residual_blocks'):
with slim.arg_scope([slim.conv2d],
kernel_size=kernel_size,
stride=1,
activation_fn=tf.nn.relu,
padding='VALID'):
for block_id in xrange(num_resnet_blocks):
with tf.compat.v1.variable_scope('block_{}'.format(block_id)):
with tf.variable_scope('block_{}'.format(block_id)):
res_net = tf.pad(tensor=net, paddings=paddings, mode='REFLECT')
res_net = layers.conv2d(res_net, num_filters * 4)
res_net = slim.conv2d(res_net, num_filters * 4)
res_net = tf.pad(tensor=res_net, paddings=paddings, mode='REFLECT')
res_net = layers.conv2d(res_net, num_filters * 4,
activation_fn=None)
res_net = slim.conv2d(res_net, num_filters * 4, activation_fn=None)
net += res_net
end_points['resnet_block_%d' % block_id] = net
......@@ -249,24 +245,24 @@ def cyclegan_generator_resnet(images,
###########
# Decoder #
###########
with tf.compat.v1.variable_scope('decoder'):
with tf.variable_scope('decoder'):
with contrib_framework.arg_scope([layers.conv2d],
kernel_size=kernel_size,
stride=1,
activation_fn=tf.nn.relu):
with slim.arg_scope([slim.conv2d],
kernel_size=kernel_size,
stride=1,
activation_fn=tf.nn.relu):
with tf.compat.v1.variable_scope('decoder1'):
with tf.variable_scope('decoder1'):
net = upsample_fn(net, num_outputs=num_filters * 2, stride=[2, 2])
end_points['decoder1'] = net
with tf.compat.v1.variable_scope('decoder2'):
with tf.variable_scope('decoder2'):
net = upsample_fn(net, num_outputs=num_filters, stride=[2, 2])
end_points['decoder2'] = net
with tf.compat.v1.variable_scope('output'):
with tf.variable_scope('output'):
net = tf.pad(tensor=net, paddings=spatial_pad_3, mode='REFLECT')
logits = layers.conv2d(
logits = slim.conv2d(
net,
num_outputs, [7, 7],
activation_fn=None,
......
......@@ -18,7 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.compat.v1 as tf
from nets import cyclegan
......@@ -31,7 +31,7 @@ class CycleganTest(tf.test.TestCase):
img_batch = tf.zeros([2, 32, 32, 3])
model_output, _ = cyclegan.cyclegan_generator_resnet(img_batch)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
sess.run(model_output)
def _test_generator_graph_helper(self, shape):
......@@ -50,13 +50,13 @@ class CycleganTest(tf.test.TestCase):
def test_generator_unknown_batch_dim(self):
"""Check that generator can take unknown batch dimension inputs."""
img = tf.compat.v1.placeholder(tf.float32, shape=[None, 32, None, 3])
img = tf.placeholder(tf.float32, shape=[None, 32, None, 3])
output_imgs, _ = cyclegan.cyclegan_generator_resnet(img)
self.assertAllEqual([None, 32, None, 3], output_imgs.shape.as_list())
def _input_and_output_same_shape_helper(self, kernel_size):
img_batch = tf.compat.v1.placeholder(tf.float32, shape=[None, 32, 32, 3])
img_batch = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
output_img_batch, _ = cyclegan.cyclegan_generator_resnet(
img_batch, kernel_size=kernel_size)
......@@ -79,7 +79,7 @@ class CycleganTest(tf.test.TestCase):
self.assertRaisesRegexp(
ValueError, 'The input height must be a multiple of 4.',
cyclegan.cyclegan_generator_resnet,
tf.compat.v1.placeholder(tf.float32, shape=[None, height, 32, 3]))
tf.placeholder(tf.float32, shape=[None, height, 32, 3]))
def test_error_if_height_not_multiple_of_four_height29(self):
self._error_if_height_not_multiple_of_four_helper(29)
......@@ -94,7 +94,7 @@ class CycleganTest(tf.test.TestCase):
self.assertRaisesRegexp(
ValueError, 'The input width must be a multiple of 4.',
cyclegan.cyclegan_generator_resnet,
tf.compat.v1.placeholder(tf.float32, shape=[None, 32, width, 3]))
tf.placeholder(tf.float32, shape=[None, 32, width, 3]))
def test_error_if_width_not_multiple_of_four_width29(self):
self._error_if_width_not_multiple_of_four_helper(29)
......
......@@ -20,10 +20,8 @@ from __future__ import print_function
from math import log
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
def _validate_image_inputs(inputs):
......@@ -82,7 +80,7 @@ def discriminator(inputs,
inp_shape = inputs.get_shape().as_list()[1]
end_points = {}
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d],
......@@ -157,7 +155,7 @@ def generator(inputs,
end_points = {}
num_layers = int(log(final_size, 2)) - 1
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d_transpose],
......
......@@ -19,7 +19,7 @@ from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow.compat.v1 as tf
from nets import dcgan
......@@ -27,19 +27,19 @@ from nets import dcgan
class DCGANTest(tf.test.TestCase):
def test_generator_run(self):
tf.compat.v1.set_random_seed(1234)
tf.set_random_seed(1234)
noise = tf.random.normal([100, 64])
image, _ = dcgan.generator(noise)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
image.eval()
def test_generator_graph(self):
tf.compat.v1.set_random_seed(1234)
tf.set_random_seed(1234)
# Check graph construction for a number of image size/depths and batch
# sizes.
for i, batch_size in zip(xrange(3, 7), xrange(3, 8)):
tf.compat.v1.reset_default_graph()
tf.reset_default_graph()
final_size = 2 ** i
noise = tf.random.normal([batch_size, 64])
image, end_points = dcgan.generator(
......@@ -74,14 +74,14 @@ class DCGANTest(tf.test.TestCase):
image = tf.random.uniform([5, 32, 32, 3], -1, 1)
output, _ = dcgan.discriminator(image)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output.eval()
def test_discriminator_graph(self):
# Check graph construction for a number of image size/depths and batch
# sizes.
for i, batch_size in zip(xrange(1, 6), xrange(3, 8)):
tf.compat.v1.reset_default_graph()
tf.reset_default_graph()
img_w = 2 ** i
image = tf.random.uniform([batch_size, img_w, img_w, 3], -1, 1)
output, end_points = dcgan.discriminator(
......@@ -103,7 +103,7 @@ class DCGANTest(tf.test.TestCase):
with self.assertRaises(ValueError):
dcgan.discriminator(wrong_dim_img)
spatially_undefined_shape = tf.compat.v1.placeholder(
spatially_undefined_shape = tf.placeholder(
tf.float32, [5, 32, None, 3])
with self.assertRaises(ValueError):
dcgan.discriminator(spatially_undefined_shape)
......
......@@ -24,16 +24,14 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import i3d_utils
from nets import s3dg
slim = contrib_slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
conv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal
......@@ -152,12 +150,12 @@ def i3d(inputs,
activation.
"""
# Final pooling and prediction
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with slim.arg_scope(
[slim.batch_norm, slim.dropout], is_training=is_training):
net, end_points = i3d_base(inputs, scope=scope)
with tf.compat.v1.variable_scope('Logits'):
with tf.variable_scope('Logits'):
kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])
net = slim.avg_pool3d(
net, kernel_size, stride=1, scope='AvgPool_0a_7x7')
......
......@@ -18,7 +18,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import six
import tensorflow.compat.v1 as tf
from nets import i3d
......@@ -55,7 +56,7 @@ class I3DTest(tf.test.TestCase):
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
self.assertItemsEqual(list(end_points.keys()), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
......@@ -100,8 +101,9 @@ class I3DTest(tf.test.TestCase):
'Mixed_5b': [5, 8, 7, 7, 832],
'Mixed_5c': [5, 8, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.iteritems():
self.assertItemsEqual(
list(endpoints_shapes.keys()), list(end_points.keys()))
for endpoint_name, expected_shape in six.iteritems(endpoints_shapes):
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
......@@ -140,7 +142,7 @@ class I3DTest(tf.test.TestCase):
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
......
......@@ -19,15 +19,11 @@ from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
import tensorflow.compat.v1 as tf
import tf_slim as slim
# Orignaly, add_arg_scope = slim.add_arg_scope and layers = slim, now switch to
# more update-to-date tf.contrib.* API.
add_arg_scope = contrib_framework.add_arg_scope
layers = contrib_layers
add_arg_scope = slim.add_arg_scope
layers = slim.layers
def center_initializer():
......@@ -228,13 +224,13 @@ def inception_block_v1_3d(inputs,
"""
use_gating = self_gating_fn is not None
with tf.compat.v1.variable_scope(scope):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(scope):
with tf.variable_scope('Branch_0'):
branch_0 = layers.conv3d(
inputs, num_outputs_0_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
if use_gating:
branch_0 = self_gating_fn(branch_0, scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = layers.conv3d(
inputs, num_outputs_1_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_1 = conv3d_spatiotemporal(
......@@ -242,7 +238,7 @@ def inception_block_v1_3d(inputs,
scope='Conv2d_0b_3x3')
if use_gating:
branch_1 = self_gating_fn(branch_1, scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = layers.conv3d(
inputs, num_outputs_2_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_2 = conv3d_spatiotemporal(
......@@ -250,7 +246,7 @@ def inception_block_v1_3d(inputs,
scope='Conv2d_0b_3x3')
if use_gating:
branch_2 = self_gating_fn(branch_2, scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = layers.max_pool3d(inputs, [3, 3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv3d(
branch_3, num_outputs_3_0b, [1, 1, 1], scope='Conv2d_0b_1x1')
......
......@@ -25,21 +25,19 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.compat.v1.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
......@@ -59,10 +57,10 @@ def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.compat.v1.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
......@@ -85,10 +83,10 @@ def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.compat.v1.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
......@@ -155,7 +153,7 @@ def inception_resnet_v2_base(inputs,
end_points[name] = net
return name == final_endpoint
with tf.compat.v1.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
......@@ -188,20 +186,20 @@ def inception_resnet_v2_base(inputs,
if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points
# 35 x 35 x 320
with tf.compat.v1.variable_scope('Mixed_5b'):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
......@@ -218,12 +216,12 @@ def inception_resnet_v2_base(inputs,
# 33 x 33 x 1088 if output_stride == 16
use_atrous = output_stride == 8
with tf.compat.v1.variable_scope('Mixed_6a'):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
......@@ -231,7 +229,7 @@ def inception_resnet_v2_base(inputs,
stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='MaxPool_1a_3x3')
......@@ -251,25 +249,25 @@ def inception_resnet_v2_base(inputs,
'PreAuxlogits end_point for now.')
# 8 x 8 x 2080
with tf.compat.v1.variable_scope('Mixed_7a'):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2,
padding=padding,
scope='MaxPool_1a_3x3')
......@@ -320,7 +318,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
"""
end_points = {}
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, 'InceptionResnetV2', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
......@@ -329,7 +327,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
activation_fn=activation_fn)
if create_aux_logits and num_classes:
with tf.compat.v1.variable_scope('AuxLogits'):
with tf.variable_scope('AuxLogits'):
aux = end_points['PreAuxLogits']
aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3')
......@@ -341,7 +339,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
scope='Logits')
end_points['AuxLogits'] = aux
with tf.compat.v1.variable_scope('Logits'):
with tf.variable_scope('Logits'):
# TODO(sguada,arnoegw): Consider adding a parameter global_pool which
# can be set to False to disable pooling here (as in resnet_*()).
kernel_size = net.get_shape()[1:3]
......@@ -372,7 +370,7 @@ def inception_resnet_v2_arg_scope(
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
batch_norm_scale=False):
"""Returns the scope with the default parameters for inception_resnet_v2.
......
......@@ -17,8 +17,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception
......@@ -117,7 +117,7 @@ class InceptionTest(tf.test.TestCase):
if endpoint != 'PreAuxLogits':
self.assertTrue(out_tensor.op.name.startswith(
'InceptionResnetV2/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys())
self.assertItemsEqual(endpoints[:index + 1], end_points.keys())
def testBuildAndCheckAllEndPointsUptoPreAuxLogits(self):
batch_size = 5
......@@ -204,15 +204,15 @@ class InceptionTest(tf.test.TestCase):
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with tf.compat.v1.variable_scope('on_cpu'), tf.device('/cpu:0'):
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
with tf.compat.v1.variable_scope('on_gpu'), tf.device('/gpu:0'):
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
......@@ -248,7 +248,7 @@ class InceptionTest(tf.test.TestCase):
height, width = 330, 400
num_classes = 1000
with self.test_session() as sess:
inputs = tf.compat.v1.placeholder(tf.float32, (batch_size, None, None, 3))
inputs = tf.placeholder(tf.float32, (batch_size, None, None, 3))
logits, end_points = inception.inception_resnet_v2(
inputs, num_classes, create_aux_logits=False)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
......@@ -256,7 +256,7 @@ class InceptionTest(tf.test.TestCase):
[batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1']
images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
logits_out, pre_pool_out = sess.run([logits, pre_pool],
{inputs: images.eval()})
self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))
......@@ -267,13 +267,13 @@ class InceptionTest(tf.test.TestCase):
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
......@@ -287,7 +287,7 @@ class InceptionTest(tf.test.TestCase):
num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
......@@ -305,32 +305,32 @@ class InceptionTest(tf.test.TestCase):
is_training=False,
reuse=True)
predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testNoBatchNormScaleByDefault(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with contrib_slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
inception.inception_resnet_v2(inputs, num_classes, is_training=False)
self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 299, 299
num_classes = 1000
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
with contrib_slim.arg_scope(
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_resnet_v2_arg_scope(batch_norm_scale=True)):
inception.inception_resnet_v2(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
......@@ -24,10 +24,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
def inception_arg_scope(
......@@ -36,7 +34,7 @@ def inception_arg_scope(
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
batch_norm_scale=False):
"""Defines the default arg scope for inception models.
......
......@@ -18,15 +18,13 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception_utils
slim = contrib_slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
......@@ -62,7 +60,7 @@ def inception_v1_base(inputs,
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.compat.v1.variable_scope(scope, 'InceptionV1', [inputs]):
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)):
......@@ -97,16 +95,16 @@ def inception_v1_base(inputs,
return net, end_points
end_point = 'Mixed_3b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -115,16 +113,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -138,16 +136,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -156,16 +154,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -174,16 +172,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -192,16 +190,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -210,16 +208,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -233,16 +231,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -251,16 +249,16 @@ def inception_v1_base(inputs,
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
......@@ -316,12 +314,12 @@ def inception_v1(inputs,
activation.
"""
# Final pooling and prediction
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, 'InceptionV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.compat.v1.variable_scope('Logits'):
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
......
......@@ -19,13 +19,11 @@ from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception
slim = contrib_slim
class InceptionV1Test(tf.test.TestCase):
......@@ -172,13 +170,13 @@ class InceptionV1Test(tf.test.TestCase):
expected_shape)
def testUnknownImageShape(self):
tf.compat.v1.reset_default_graph()
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.compat.v1.placeholder(
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
......@@ -186,18 +184,18 @@ class InceptionV1Test(tf.test.TestCase):
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.compat.v1.global_variables_initializer().run()
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.compat.v1.reset_default_graph()
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.compat.v1.placeholder(
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes,
global_pool=True)
......@@ -206,7 +204,7 @@ class InceptionV1Test(tf.test.TestCase):
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.compat.v1.global_variables_initializer().run()
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
......@@ -215,7 +213,7 @@ class InceptionV1Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
......@@ -223,7 +221,7 @@ class InceptionV1Test(tf.test.TestCase):
images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
......@@ -238,7 +236,7 @@ class InceptionV1Test(tf.test.TestCase):
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
......@@ -255,7 +253,7 @@ class InceptionV1Test(tf.test.TestCase):
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
......@@ -267,32 +265,32 @@ class InceptionV1Test(tf.test.TestCase):
spatial_squeeze=False)
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1(inputs, num_classes, is_training=False)
self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v1_arg_scope(batch_norm_scale=True)):
inception.inception_v1(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
......@@ -18,15 +18,13 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception_utils
slim = contrib_slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
......@@ -95,7 +93,7 @@ def inception_v2_base(inputs,
)
concat_dim = 3 if data_format == 'NHWC' else 1
with tf.compat.v1.variable_scope(scope, 'InceptionV2', [inputs]):
with tf.variable_scope(scope, 'InceptionV2', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
......@@ -170,17 +168,17 @@ def inception_v2_base(inputs,
# 28 x 28 x 192
# Inception module.
end_point = 'Mixed_3b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [3, 3],
scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -189,7 +187,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(32), [1, 1],
......@@ -201,17 +199,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 256
end_point = 'Mixed_3c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -220,7 +218,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(64), [1, 1],
......@@ -232,15 +230,15 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 320
end_point = 'Mixed_4a'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -249,7 +247,7 @@ def inception_v2_base(inputs,
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2])
......@@ -257,17 +255,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -276,7 +274,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
......@@ -288,17 +286,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -307,7 +305,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
......@@ -319,17 +317,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4d'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [3, 3],
scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -338,7 +336,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
......@@ -350,17 +348,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4e'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3],
scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -369,7 +367,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
......@@ -381,15 +379,15 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_5a'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -398,7 +396,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3')
net = tf.concat(
......@@ -407,17 +405,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -426,7 +424,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
......@@ -438,17 +436,17 @@ def inception_v2_base(inputs,
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
......@@ -457,7 +455,7 @@ def inception_v2_base(inputs,
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
......@@ -528,14 +526,14 @@ def inception_v2(inputs,
raise ValueError('depth_multiplier is not greater than zero.')
# Final pooling and prediction
with tf.compat.v1.variable_scope(
with tf.variable_scope(
scope, 'InceptionV2', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v2_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
with tf.compat.v1.variable_scope('Logits'):
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
......
......@@ -19,13 +19,11 @@ from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception
slim = contrib_slim
class InceptionV2Test(tf.test.TestCase):
......@@ -284,13 +282,13 @@ class InceptionV2Test(tf.test.TestCase):
expected_shape)
def testUnknownImageShape(self):
tf.compat.v1.reset_default_graph()
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.compat.v1.placeholder(
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
......@@ -298,18 +296,18 @@ class InceptionV2Test(tf.test.TestCase):
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.compat.v1.global_variables_initializer().run()
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.compat.v1.reset_default_graph()
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.compat.v1.placeholder(
inputs = tf.placeholder(
tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v2(inputs, num_classes,
global_pool=True)
......@@ -318,7 +316,7 @@ class InceptionV2Test(tf.test.TestCase):
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.compat.v1.global_variables_initializer().run()
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
......@@ -327,7 +325,7 @@ class InceptionV2Test(tf.test.TestCase):
height, width = 224, 224
num_classes = 1000
inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
......@@ -335,7 +333,7 @@ class InceptionV2Test(tf.test.TestCase):
images = tf.random.uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
......@@ -350,7 +348,7 @@ class InceptionV2Test(tf.test.TestCase):
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
......@@ -367,7 +365,7 @@ class InceptionV2Test(tf.test.TestCase):
predictions = tf.argmax(input=logits, axis=1)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
......@@ -379,32 +377,32 @@ class InceptionV2Test(tf.test.TestCase):
spatial_squeeze=False)
with self.test_session() as sess:
tf.compat.v1.global_variables_initializer().run()
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v2_arg_scope()):
inception.inception_v2(inputs, num_classes, is_training=False)
self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v2_arg_scope(batch_norm_scale=True)):
inception.inception_v2(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name
for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))
for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment