Commit ba87e2c6 authored by Mark Sandler's avatar Mark Sandler Committed by Sergio Guadarrama
Browse files

Merged commit includes the following changes: (#7797)

279978375  by Sergio Guadarrama:

    Pass s=2 to the expanded_conv block so it can apply residual correctly in case of fused convolutions.  (Before it was relying on channel mismatch only)

--
279788358  by Sergio Guadarrama:

    Update README to add mobilenet-edgetpu details

--
279774392  by Sergio Guadarrama:

    Adds MobilenetV3-EdgeTpu definition.

--
278917344  by Sergio Guadarrama:

    Create visualwakewords dataset using slim scripts instead of custom scripts.

--
277940048  by Sergio Guadarrama:

    Internal changes to tf.contrib symbols

--

PiperOrigin-RevId: 279978375
parent c2902cfb
......@@ -28,11 +28,13 @@ import copy
import functools
import tensorflow as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import slim as contrib_slim
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet as lib
slim = tf.contrib.slim
slim = contrib_slim
op = lib.op
expand_input = ops.expand_input_by_factor
......@@ -84,18 +86,18 @@ V2_DEF = dict(
# Mobilenet v2 Definition with group normalization.
V2_DEF_GROUP_NORM = copy.deepcopy(V2_DEF)
V2_DEF_GROUP_NORM['defaults'] = {
(tf.contrib.slim.conv2d, tf.contrib.slim.fully_connected,
tf.contrib.slim.separable_conv2d): {
'normalizer_fn': tf.contrib.layers.group_norm, # pylint: disable=C0330
(contrib_slim.conv2d, contrib_slim.fully_connected,
contrib_slim.separable_conv2d): {
'normalizer_fn': contrib_layers.group_norm, # pylint: disable=C0330
'activation_fn': tf.nn.relu6, # pylint: disable=C0330
}, # pylint: disable=C0330
(ops.expanded_conv,): {
'expansion_size': ops.expand_input_by_factor(6),
'split_expansion': 1,
'normalizer_fn': tf.contrib.layers.group_norm,
'normalizer_fn': contrib_layers.group_norm,
'residual': True
},
(tf.contrib.slim.conv2d, tf.contrib.slim.separable_conv2d): {
(contrib_slim.conv2d, contrib_slim.separable_conv2d): {
'padding': 'SAME'
}
}
......@@ -213,7 +215,7 @@ def mobilenet_base_group_norm(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
kwargs['conv_defs'] = V2_DEF_GROUP_NORM
kwargs['conv_defs']['defaults'].update({
(tf.contrib.layers.group_norm,): {
(contrib_layers.group_norm,): {
'groups': kwargs.pop('groups', 8)
}
})
......
......@@ -19,12 +19,13 @@ from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
slim = tf.contrib.slim
slim = contrib_slim
def find_ops(optype):
......
......@@ -23,11 +23,12 @@ import functools
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet as lib
slim = tf.contrib.slim
slim = contrib_slim
op = lib.op
expand_input = ops.expand_input_by_factor
......@@ -57,7 +58,7 @@ def reduce_to_1x1(input_tensor, default_size=7, **kwargs):
return slim.avg_pool2d(input_tensor, kernel_size=k, **kwargs)
def mbv3_op(ef, n, k, s=1, act=tf.nn.relu, se=None):
def mbv3_op(ef, n, k, s=1, act=tf.nn.relu, se=None, **kwargs):
"""Defines a single Mobilenet V3 convolution block.
Args:
......@@ -67,14 +68,44 @@ def mbv3_op(ef, n, k, s=1, act=tf.nn.relu, se=None):
s: stride
act: activation function in inner layers
se: squeeze excite function.
**kwargs: passed to expanded_conv
Returns:
An object (lib._Op) for inserting in conv_def, representing this operation.
"""
return op(ops.expanded_conv, expansion_size=expand_input(ef),
kernel_size=(k, k), stride=s, num_outputs=n,
inner_activation_fn=act,
expansion_transform=se)
return op(
ops.expanded_conv,
expansion_size=expand_input(ef),
kernel_size=(k, k),
stride=s,
num_outputs=n,
inner_activation_fn=act,
expansion_transform=se,
**kwargs)
def mbv3_fused(ef, n, k, s=1, **kwargs):
"""Defines a single Mobilenet V3 convolution block.
Args:
ef: expansion factor
n: number of output channels
k: stride of depthwise
s: stride
**kwargs: will be passed to mbv3_op
Returns:
An object (lib._Op) for inserting in conv_def, representing this operation.
"""
expansion_fn = functools.partial(slim.conv2d, kernel_size=k, stride=s)
return mbv3_op(
ef,
n,
k=1,
s=s,
depthwise_location=None,
expansion_fn=expansion_fn,
**kwargs)
mbv3_op_se = functools.partial(mbv3_op, se=_se4)
......@@ -206,6 +237,38 @@ V3_SMALL_MINIMALISTIC = dict(
]))
# EdgeTPU friendly variant of MobilenetV3 that uses fused convolutions
# instead of depthwise in the early layers.
V3_EDGETPU = dict(
defaults=dict(DEFAULTS),
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=(3, 3)),
mbv3_fused(k=3, s=1, ef=1, n=16),
mbv3_fused(k=3, s=2, ef=8, n=32),
mbv3_fused(k=3, s=1, ef=4, n=32),
mbv3_fused(k=3, s=1, ef=4, n=32),
mbv3_fused(k=3, s=1, ef=4, n=32),
mbv3_fused(k=3, s=2, ef=8, n=48),
mbv3_fused(k=3, s=1, ef=4, n=48),
mbv3_fused(k=3, s=1, ef=4, n=48),
mbv3_fused(k=3, s=1, ef=4, n=48),
mbv3_op(k=3, s=2, ef=8, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=8, n=96, residual=False),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=5, s=2, ef=8, n=160),
mbv3_op(k=5, s=1, ef=4, n=160),
mbv3_op(k=5, s=1, ef=4, n=160),
mbv3_op(k=5, s=1, ef=4, n=160),
mbv3_op(k=3, s=1, ef=8, n=192),
op(slim.conv2d, stride=1, num_outputs=1280, kernel_size=(1, 1)),
])
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
......@@ -275,15 +338,26 @@ def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
input_tensor, depth_multiplier=depth_multiplier, base_only=True, **kwargs)
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
def wrapped_partial(func, new_defaults=None,
**kwargs):
"""Partial function with new default parameters and updated docstring."""
if not new_defaults:
new_defaults = {}
def func_wrapper(*f_args, **f_kwargs):
new_kwargs = dict(new_defaults)
new_kwargs.update(f_kwargs)
return func(*f_args, **new_kwargs)
functools.update_wrapper(func_wrapper, func)
partial_func = functools.partial(func_wrapper, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
large = wrapped_partial(mobilenet, conv_defs=V3_LARGE)
small = wrapped_partial(mobilenet, conv_defs=V3_SMALL)
edge_tpu = wrapped_partial(mobilenet,
new_defaults={'scope': 'MobilenetEdgeTPU'},
conv_defs=V3_EDGETPU)
# Minimalistic model that does not have Squeeze Excite blocks,
# Hardswish, or 5x5 depthwise convolution.
......
......@@ -42,6 +42,21 @@ class MobilenetV3Test(absltest.TestCase):
conv_defs=mobilenet_v3.V3_SMALL)
self.assertEqual(endpoints['layer_15'].shape, [1, 1, 1, 1024])
def testMobilenetEdgeTpu(self):
_, endpoints = mobilenet_v3.edge_tpu(
tf.placeholder(tf.float32, (1, 224, 224, 3)))
self.assertIn('Inference mode is created by default',
mobilenet_v3.edge_tpu.__doc__)
self.assertEqual(endpoints['layer_24'].shape, [1, 7, 7, 1280])
self.assertStartsWith(
endpoints['layer_24'].name, 'MobilenetEdgeTPU')
def testMobilenetEdgeTpuChangeScope(self):
_, endpoints = mobilenet_v3.edge_tpu(
tf.placeholder(tf.float32, (1, 224, 224, 3)), scope='Scope')
self.assertStartsWith(
endpoints['layer_24'].name, 'Scope')
def testMobilenetV3BaseOnly(self):
result, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
......
......@@ -109,8 +109,10 @@ from collections import namedtuple
import functools
import tensorflow as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import slim as contrib_slim
slim = tf.contrib.slim
slim = contrib_slim
# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
......@@ -307,7 +309,7 @@ def mobilenet_v1(inputs,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=tf.contrib.layers.softmax,
prediction_fn=contrib_layers.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
......@@ -461,7 +463,7 @@ def mobilenet_v1_arg_scope(
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
regularizer = contrib_layers.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
......
......@@ -20,12 +20,14 @@ from __future__ import print_function
import math
import tensorflow as tf
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib import slim as contrib_slim
from datasets import dataset_factory
from nets import mobilenet_v1
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
slim = contrib_slim
flags = tf.app.flags
......@@ -124,7 +126,7 @@ def build_model():
num_classes=FLAGS.num_classes)
if FLAGS.quantize:
tf.contrib.quantize.create_eval_graph()
contrib_quantize.create_eval_graph()
eval_ops = metrics(logits, labels)
......
......@@ -20,10 +20,11 @@ from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from nets import mobilenet_v1
slim = tf.contrib.slim
slim = contrib_slim
class MobilenetV1Test(tf.test.TestCase):
......
......@@ -19,12 +19,14 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib import slim as contrib_slim
from datasets import dataset_factory
from nets import mobilenet_v1
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
slim = contrib_slim
flags = tf.app.flags
......@@ -136,7 +138,7 @@ def build_model():
# quant_delay delays start of quantization till quant_delay steps, allowing
# for better model accuracy.
if FLAGS.quantize:
tf.contrib.quantize.create_training_graph(quant_delay=get_quant_delay())
contrib_quantize.create_training_graph(quant_delay=get_quant_delay())
total_loss = tf.losses.get_total_loss(name='total_loss')
# Configure the learning rate using an exponential decay.
......
......@@ -22,11 +22,15 @@ from __future__ import print_function
import copy
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import slim as contrib_slim
from tensorflow.contrib import training as contrib_training
from nets.nasnet import nasnet_utils
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
# Notes for training NASNet Cifar Model
......@@ -37,7 +41,7 @@ slim = tf.contrib.slim
# auxiliary head loss weighting: 0.4
# clip global norm of all gradients by 5
def cifar_config():
return tf.contrib.training.HParams(
return contrib_training.HParams(
stem_multiplier=3.0,
drop_path_keep_prob=0.6,
num_cells=18,
......@@ -67,7 +71,7 @@ def cifar_config():
# label smoothing: 0.1
# clip global norm of all gradients by 10
def large_imagenet_config():
return tf.contrib.training.HParams(
return contrib_training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=18,
......@@ -94,7 +98,7 @@ def large_imagenet_config():
# label smoothing: 0.1
# clip global norm of all gradients by 10
def mobile_imagenet_config():
return tf.contrib.training.HParams(
return contrib_training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
......@@ -138,8 +142,8 @@ def nasnet_cifar_arg_scope(weight_decay=5e-4,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
weights_initializer = contrib_layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
......@@ -174,8 +178,8 @@ def nasnet_mobile_arg_scope(weight_decay=4e-5,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
weights_initializer = contrib_layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
......@@ -210,8 +214,8 @@ def nasnet_large_arg_scope(weight_decay=5e-5,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
weights_initializer = contrib_layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
......@@ -244,7 +248,7 @@ def _build_aux_head(net, end_points, num_classes, hparams, scope):
aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1')
aux_logits = activation_fn(aux_logits)
aux_logits = tf.contrib.layers.flatten(aux_logits)
aux_logits = contrib_layers.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes)
end_points['AuxLogits'] = aux_logits
......
......@@ -18,10 +18,11 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from nets.nasnet import nasnet
slim = tf.contrib.slim
slim = contrib_slim
class NASNetTest(tf.test.TestCase):
......
......@@ -32,10 +32,11 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
......@@ -55,14 +56,14 @@ def calc_reduction_layers(num_cells, num_reduction_layers):
return reduction_layers
@tf.contrib.framework.add_arg_scope
@contrib_framework.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@tf.contrib.framework.add_arg_scope
@contrib_framework.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
......@@ -74,7 +75,7 @@ def get_channel_dim(shape, data_format=INVALID):
raise ValueError('Not a valid data_format', data_format)
@tf.contrib.framework.add_arg_scope
@contrib_framework.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
......@@ -86,7 +87,7 @@ def global_avg_pool(x, data_format=INVALID):
return tf.reduce_mean(x, [2, 3])
@tf.contrib.framework.add_arg_scope
@contrib_framework.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert data_format != INVALID
......@@ -129,7 +130,7 @@ def factorized_reduction(net, output_filters, stride, data_format=INVALID):
return final_path
@tf.contrib.framework.add_arg_scope
@contrib_framework.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
......@@ -422,7 +423,7 @@ class NasNetABaseCell(object):
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
@tf.contrib.framework.add_arg_scope # No public API. For internal use only.
@contrib_framework.add_arg_scope # No public API. For internal use only.
def _apply_drop_path(self, net, current_step=None,
use_summaries=False, drop_connect_version='v3'):
"""Apply drop_path regularization.
......
......@@ -23,17 +23,20 @@ from __future__ import print_function
import copy
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
from tensorflow.contrib import training as contrib_training
from nets.nasnet import nasnet
from nets.nasnet import nasnet_utils
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
def large_imagenet_config():
"""Large ImageNet configuration based on PNASNet-5."""
return tf.contrib.training.HParams(
return contrib_training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
......@@ -51,7 +54,7 @@ def large_imagenet_config():
def mobile_imagenet_config():
"""Mobile ImageNet configuration based on PNASNet-5."""
return tf.contrib.training.HParams(
return contrib_training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=9,
......
......@@ -18,10 +18,11 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from nets.nasnet import pnasnet
slim = tf.contrib.slim
slim = contrib_slim
class PNASNetTest(tf.test.TestCase):
......
......@@ -18,8 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from nets import alexnet
from nets import cifarnet
......@@ -37,7 +36,7 @@ from nets.nasnet import nasnet
from nets.nasnet import pnasnet
slim = tf.contrib.slim
slim = contrib_slim
networks_map = {'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
......
......@@ -32,8 +32,9 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = tf.contrib.slim
slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
......
......@@ -18,10 +18,11 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from nets import overfeat
slim = tf.contrib.slim
slim = contrib_slim
class OverFeatTest(tf.test.TestCase):
......
......@@ -33,8 +33,10 @@ import collections
import functools
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
layers = tf.contrib.layers
layers = contrib_layers
def pix2pix_arg_scope():
......@@ -52,7 +54,7 @@ def pix2pix_arg_scope():
'epsilon': 0.00001,
}
with tf.contrib.framework.arg_scope(
with contrib_framework.arg_scope(
[layers.conv2d, layers.conv2d_transpose],
normalizer_fn=layers.instance_norm,
normalizer_params=instance_norm_params,
......@@ -165,11 +167,10 @@ def pix2pix_generator(net,
# Encoder #
###########
with tf.variable_scope('encoder'):
with tf.contrib.framework.arg_scope(
[layers.conv2d],
kernel_size=[4, 4],
stride=2,
activation_fn=tf.nn.leaky_relu):
with contrib_framework.arg_scope([layers.conv2d],
kernel_size=[4, 4],
stride=2,
activation_fn=tf.nn.leaky_relu):
for block_id, block in enumerate(blocks):
# No normalizer for the first encoder layers as per 'Image-to-Image',
......@@ -196,7 +197,7 @@ def pix2pix_generator(net,
with tf.variable_scope('decoder'):
# Dropout is used at both train and test time as per 'Image-to-Image',
# Section 2.1 (last paragraph).
with tf.contrib.framework.arg_scope([layers.dropout], is_training=True):
with contrib_framework.arg_scope([layers.dropout], is_training=True):
for block_id, block in enumerate(reversed_blocks):
if block_id > 0:
......@@ -256,12 +257,11 @@ def pix2pix_discriminator(net, num_filters, padding=2, pad_mode='REFLECT',
else:
return net
with tf.contrib.framework.arg_scope(
[layers.conv2d],
kernel_size=[4, 4],
stride=2,
padding='valid',
activation_fn=activation_fn):
with contrib_framework.arg_scope([layers.conv2d],
kernel_size=[4, 4],
stride=2,
padding='valid',
activation_fn=activation_fn):
# No normalization on the input layer.
net = layers.conv2d(
......
......@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from nets import pix2pix
......@@ -35,7 +36,7 @@ class GeneratorTest(tf.test.TestCase):
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='nn_upsample_conv')
......@@ -52,7 +53,7 @@ class GeneratorTest(tf.test.TestCase):
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='conv2d_transpose')
......@@ -73,7 +74,7 @@ class GeneratorTest(tf.test.TestCase):
pix2pix.Block(64, 0.5),
pix2pix.Block(128, 0),
]
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
_, end_points = pix2pix.pix2pix_generator(
images, num_outputs, blocks)
......@@ -105,7 +106,7 @@ class DiscriminatorTest(tf.test.TestCase):
output_size = self._layer_output_size(output_size, stride=1)
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512])
self.assertListEqual([batch_size, output_size, output_size, 1],
......@@ -124,7 +125,7 @@ class DiscriminatorTest(tf.test.TestCase):
output_size = self._layer_output_size(output_size, stride=1, pad=0)
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=0)
self.assertListEqual([batch_size, output_size, output_size, 1],
......@@ -137,7 +138,7 @@ class DiscriminatorTest(tf.test.TestCase):
input_size = 256
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with self.assertRaises(TypeError):
pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=1.5)
......@@ -147,7 +148,7 @@ class DiscriminatorTest(tf.test.TestCase):
input_size = 256
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with self.assertRaises(ValueError):
pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=-1)
......
......@@ -39,8 +39,9 @@ from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = tf.contrib.slim
slim = contrib_slim
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
......
......@@ -57,12 +57,13 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from nets import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
slim = tf.contrib.slim
slim = contrib_slim
class NoOpScope(object):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment