Unverified Commit 8518d053 authored by pkulzc's avatar pkulzc Committed by GitHub
Browse files

Open source MnasFPN and minor fixes to OD API (#8484)

310447280  by lzc:

    Internal change

310420845  by Zhichao Lu:

    Open source the internal Context RCNN code.

--
310362339  by Zhichao Lu:

    Internal change

310259448  by lzc:

    Update required TF version for OD API.

--
310252159  by Zhichao Lu:

    Port patch_ops_test to TF1/TF2 as TPUs.

--
310247180  by Zhichao Lu:

    Ignore keypoint heatmap loss in the regions/bounding boxes with target keypoint
    class but no valid keypoint annotations.

--
310178294  by Zhichao Lu:

    Opensource MnasFPN
    https://arxiv.org/abs/1912.01106

--
310094222  by lzc:

    Internal changes.

--
310085250  by lzc:

    Internal Change.

--
310016447  by huizhongc:

    Remove unrecognized classes from labeled_classes.

--
310009470  by rathodv:

    Mark batcher.py as TF1 only.

--
310001984  by rathodv:

    Update core/preprocessor.py to be compatible with TF1/TF2..

--
309455035  by Zhi...
parent ac5fff19
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to manipulate feature map pyramids, such as for FPNs and BiFPNs.
Includes utility functions to facilitate feature pyramid map manipulations,
such as combining multiple feature maps, upsampling or downsampling feature
maps, and applying blocks of convolution, batchnorm, and activation layers.
"""
from six.moves import range
import tensorflow as tf
from object_detection.utils import ops
from object_detection.utils import shape_utils
def create_conv_block(name, num_filters, kernel_size, strides, padding,
use_separable, apply_batchnorm, apply_activation,
conv_hyperparams, is_training, freeze_batchnorm):
"""Create Keras layers for regular or separable convolutions.
Args:
name: String. The name of the layer.
num_filters: Number of filters (channels) for the output feature maps.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
filters, or a single int if both values are the same.
strides: A list of length 2: [stride_height, stride_width], specifying the
convolution stride, or a single int if both strides are the same.
padding: One of 'VALID' or 'SAME'.
use_separable: Bool. Whether to use depthwise separable convolution instead
of regular convolution.
apply_batchnorm: Bool. Whether to apply a batch normalization layer after
convolution, constructed according to the conv_hyperparams.
apply_activation: Bool. Whether to apply an activation layer after
convolution, constructed according to the conv_hyperparams.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
Returns:
A list of keras layers, including (regular or seperable) convolution, and
optionally batch normalization and activation layers.
"""
layers = []
if use_separable:
kwargs = conv_hyperparams.params()
# Both the regularizer and initializer apply to the depthwise layer,
# so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
# TODO(aom): Verify that the pointwise regularizer/initializer should be set
# here, since this is not the case in feature_map_generators.py
kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['pointwise_initializer'] = kwargs['kernel_initializer']
layers.append(
tf.keras.layers.SeparableConv2D(
filters=num_filters,
kernel_size=kernel_size,
depth_multiplier=1,
padding=padding,
strides=strides,
name=name + '_separable_conv',
**kwargs))
else:
layers.append(
tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=kernel_size,
padding=padding,
strides=strides,
name=name + '_conv',
**conv_hyperparams.params()))
if apply_batchnorm:
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=name + '_batchnorm'))
if apply_activation:
layers.append(
conv_hyperparams.build_activation_layer(name=name + '_activation'))
return layers
def create_downsample_feature_map_ops(scale, downsample_method,
conv_hyperparams, is_training,
freeze_batchnorm, name):
"""Creates Keras layers for downsampling feature maps.
Args:
scale: Int. The scale factor by which to downsample input feature maps. For
example, in the case of a typical feature map pyramid, the scale factor
between level_i and level_i+1 is 2.
downsample_method: String. The method used for downsampling. Currently
supported methods include 'max_pooling', 'avg_pooling', and
'depthwise_conv'.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will downsample input feature maps by the
desired scale factor.
"""
layers = []
padding = 'SAME'
stride = int(scale)
kernel_size = stride + 1
if downsample_method == 'max_pooling':
layers.append(
tf.keras.layers.MaxPooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
name=name + '_downsample_max_x{}'.format(stride)))
elif downsample_method == 'avg_pooling':
layers.append(
tf.keras.layers.AveragePooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
name=name + '_downsample_avg_x{}'.format(stride)))
elif downsample_method == 'depthwise_conv':
layers.append(
tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=stride,
padding=padding,
name=name + '_downsample_depthwise_x{}'.format(stride)))
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=name + '_downsample_batchnorm'))
layers.append(
conv_hyperparams.build_activation_layer(name=name +
'_downsample_activation'))
else:
raise ValueError('Unknown downsample method: {}'.format(downsample_method))
return layers
def create_upsample_feature_map_ops(scale, use_native_resize_op, name):
"""Creates Keras layers for upsampling feature maps.
Args:
scale: Int. The scale factor by which to upsample input feature maps. For
example, in the case of a typical feature map pyramid, the scale factor
between level_i and level_i-1 is 2.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will upsample input feature maps by the
desired scale factor.
"""
layers = []
if use_native_resize_op:
def resize_nearest_neighbor(image):
image_shape = shape_utils.combined_static_and_dynamic_shape(image)
return tf.image.resize_nearest_neighbor(
image, [image_shape[1] * scale, image_shape[2] * scale])
layers.append(
tf.keras.layers.Lambda(
resize_nearest_neighbor,
name=name + 'nearest_neighbor_upsampling_x{}'.format(scale)))
else:
def nearest_neighbor_upsampling(image):
return ops.nearest_neighbor_upsampling(image, scale=scale)
layers.append(
tf.keras.layers.Lambda(
nearest_neighbor_upsampling,
name=name + 'nearest_neighbor_upsampling_x{}'.format(scale)))
return layers
def create_resample_feature_map_ops(input_scale_factor, output_scale_factor,
downsample_method, use_native_resize_op,
conv_hyperparams, is_training,
freeze_batchnorm, name):
"""Creates Keras layers for downsampling or upsampling feature maps.
Args:
input_scale_factor: Int. Scale factor of the input feature map. For example,
for a feature pyramid where each successive level halves its spatial
resolution, the scale factor of a level is 2^level. The input and output
scale factors are used to compute the scale for upsampling or downsamling,
so they should be evenly divisible.
output_scale_factor: Int. Scale factor of the output feature map. See
input_scale_factor for additional details.
downsample_method: String. The method used for downsampling. See
create_downsample_feature_map_ops for details on supported methods.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
See create_upsample_feature_map_ops for details.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will downsample or upsample input feature maps
to match the desired output feature map scale.
"""
if input_scale_factor < output_scale_factor:
if output_scale_factor % input_scale_factor != 0:
raise ValueError('Invalid scale factor: input scale 1/{} not divisible by'
'output scale 1/{}'.format(input_scale_factor,
output_scale_factor))
scale = output_scale_factor // input_scale_factor
return create_downsample_feature_map_ops(scale, downsample_method,
conv_hyperparams, is_training,
freeze_batchnorm, name)
elif input_scale_factor > output_scale_factor:
if input_scale_factor % output_scale_factor != 0:
raise ValueError('Invalid scale factor: input scale 1/{} not a divisor of'
'output scale 1/{}'.format(input_scale_factor,
output_scale_factor))
scale = input_scale_factor // output_scale_factor
return create_upsample_feature_map_ops(scale, use_native_resize_op, name)
else:
return []
# TODO(aom): Add tests for this module in a followup CL.
class BiFPNCombineLayer(tf.keras.layers.Layer):
"""Combines multiple input feature maps into a single output feature map.
A Keras layer which combines multiple input feature maps into a single output
feature map, according to the desired combination method. Options for
combining feature maps include simple summation, or several types of weighted
sums using learned weights for each input feature map. These include
'weighted_sum', 'attention', and 'fast_attention'. For more details, see the
EfficientDet paper by Tan et al, see arxiv.org/abs/1911.09070.
Specifically, this layer takes a list of tensors as input, all of the same
shape, and returns a single tensor, also of the same shape.
"""
def __init__(self, combine_method, **kwargs):
"""Constructor.
Args:
combine_method: String. The method used to combine the input feature maps
into a single output feature map. One of 'sum', 'weighted_sum',
'attention', or 'fast_attention'.
**kwargs: Additional Keras layer arguments.
"""
super(BiFPNCombineLayer, self).__init__(**kwargs)
self.combine_method = combine_method
def _combine_weighted_sum(self, inputs):
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), self.per_input_weights),
axis=[-1])
def _combine_attention(self, inputs):
normalized_weights = tf.nn.softmax(self.per_input_weights)
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), normalized_weights),
axis=[-1])
def _combine_fast_attention(self, inputs):
weights_non_neg = tf.nn.relu(self.per_input_weights)
normalizer = tf.reduce_sum(weights_non_neg) + 0.0001
normalized_weights = weights_non_neg / normalizer
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), normalized_weights),
axis=[-1])
def build(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('A BiFPN combine layer should be called '
'on a list of inputs.')
if len(input_shape) < 2:
raise ValueError('A BiFPN combine layer should be called '
'on a list of at least 2 inputs. '
'Got ' + str(len(input_shape)) + ' inputs.')
if self.combine_method == 'sum':
self._combine_op = tf.keras.layers.Add()
elif self.combine_method == 'weighted_sum':
self._combine_op = self._combine_weighted_sum
elif self.combine_method == 'attention':
self._combine_op = self._combine_attention
elif self.combine_method == 'fast_attention':
self._combine_op = self._combine_fast_attention
else:
raise ValueError('Unknown combine type: {}'.format(self.combine_method))
if self.combine_method in {'weighted_sum', 'attention', 'fast_attention'}:
self.per_input_weights = self.add_weight(
name='bifpn_combine_weights',
shape=(len(input_shape), 1),
initializer='ones',
trainable=True)
super(BiFPNCombineLayer, self).build(input_shape)
def call(self, inputs):
"""Combines multiple input feature maps into a single output feature map.
Executed when calling the `.__call__` method on input.
Args:
inputs: A list of tensors where all tensors have the same shape, [batch,
height_i, width_i, depth_i].
Returns:
A single tensor, with the same shape as the input tensors,
[batch, height_i, width_i, depth_i].
"""
return self._combine_op(inputs)
def compute_output_shape(self, input_shape):
output_shape = input_shape[0]
for i in range(1, len(input_shape)):
if input_shape[i] != output_shape:
raise ValueError(
'Inputs could not be combined. Shapes should match, '
'but input_shape[0] is {} while input_shape[{}] is {}'.format(
output_shape, i, input_shape[i]))
......@@ -46,12 +46,13 @@ def get_image_resizer_config(model_config):
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
return model_config.faster_rcnn.image_resizer
if meta_architecture == "ssd":
return model_config.ssd.image_resizer
meta_architecture_config = getattr(model_config, meta_architecture)
raise ValueError("Unknown model type: {}".format(meta_architecture))
if hasattr(meta_architecture_config, "image_resizer"):
return getattr(meta_architecture_config, "image_resizer")
else:
raise ValueError("{} has no image_reszier_config".format(
meta_architecture))
def get_spatial_image_size(image_resizer_config):
......@@ -84,6 +85,40 @@ def get_spatial_image_size(image_resizer_config):
raise ValueError("Unknown image resizer type.")
def get_max_num_context_features(model_config):
"""Returns maximum number of context features from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the max number of context features if the model
config contains context_config, None otherwise
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.max_num_context_features
def get_context_feature_length(model_config):
"""Returns context feature length from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the fixed length of each feature in context_features.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.context_feature_length
def get_configs_from_pipeline_file(pipeline_config_path, config_override=None):
"""Reads config from a file containing pipeline_pb2.TrainEvalPipelineConfig.
......@@ -263,12 +298,12 @@ def get_number_of_classes(model_config):
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
return model_config.faster_rcnn.num_classes
if meta_architecture == "ssd":
return model_config.ssd.num_classes
meta_architecture_config = getattr(model_config, meta_architecture)
raise ValueError("Expected the model to be one of 'faster_rcnn' or 'ssd'.")
if hasattr(meta_architecture_config, "num_classes"):
return meta_architecture_config.num_classes
else:
raise ValueError("{} does not have num_classes.".format(meta_architecture))
def get_optimizer_type(train_config):
......@@ -555,6 +590,8 @@ def _maybe_update_config_with_key_value(configs, key, value):
elif field_name == "retain_original_image_additional_channels_in_eval":
_update_retain_original_image_additional_channels(configs["eval_config"],
value)
elif field_name == "num_classes":
_update_num_classes(configs["model"], value)
else:
return False
return True
......@@ -885,6 +922,8 @@ def _update_label_map_path(configs, label_map_path):
_update_all_eval_input_configs(configs, "label_map_path", label_map_path)
def _update_mask_type(configs, mask_type):
"""Updates the mask type for both train and eval input readers.
......@@ -997,3 +1036,11 @@ def remove_unecessary_ema(variables_to_restore, no_ema_collection=None):
"")] = variables_to_restore[key]
del variables_to_restore[key]
return variables_to_restore
def _update_num_classes(model_config, num_classes):
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model_config.faster_rcnn.num_classes = num_classes
if meta_architecture == "ssd":
model_config.ssd.num_classes = num_classes
......@@ -33,6 +33,14 @@ from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
from object_detection.utils import config_util
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import training as contrib_training
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
def _write_config(config, config_path):
"""Writes a config object to disk."""
......@@ -209,7 +217,7 @@ class ConfigUtilTest(tf.test.TestCase):
original_learning_rate = 0.7
learning_rate_scaling = 0.1
warmup_learning_rate = 0.07
hparams = tf.contrib.training.HParams(learning_rate=0.15)
hparams = contrib_training.HParams(learning_rate=0.15)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
# Constant learning rate.
......@@ -302,7 +310,7 @@ class ConfigUtilTest(tf.test.TestCase):
# Override each of the parameters:
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
hparams = tf.contrib.training.HParams(
hparams = contrib_training.HParams(
**{
"model.ssd.num_classes": 2,
"train_config.batch_size": 2,
......@@ -324,7 +332,7 @@ class ConfigUtilTest(tf.test.TestCase):
def testNewBatchSize(self):
"""Tests that batch size is updated appropriately."""
original_batch_size = 2
hparams = tf.contrib.training.HParams(batch_size=16)
hparams = contrib_training.HParams(batch_size=16)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
......@@ -339,7 +347,7 @@ class ConfigUtilTest(tf.test.TestCase):
def testNewBatchSizeWithClipping(self):
"""Tests that batch size is clipped to 1 from below."""
original_batch_size = 2
hparams = tf.contrib.training.HParams(batch_size=0.5)
hparams = contrib_training.HParams(batch_size=0.5)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
......@@ -356,7 +364,7 @@ class ConfigUtilTest(tf.test.TestCase):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = 2
configs = self._create_and_load_test_configs(pipeline_config)
hparams = tf.contrib.training.HParams(**{"train_config.batch_size": 10})
hparams = contrib_training.HParams(**{"train_config.batch_size": 10})
configs = config_util.merge_external_params_with_configs(configs, hparams)
new_batch_size = configs["train_config"].batch_size
self.assertEqual(10, new_batch_size)
......@@ -365,7 +373,7 @@ class ConfigUtilTest(tf.test.TestCase):
"""Tests that overwriting with a bad key causes an exception."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
configs = self._create_and_load_test_configs(pipeline_config)
hparams = tf.contrib.training.HParams(**{"train_config.no_such_field": 10})
hparams = contrib_training.HParams(**{"train_config.no_such_field": 10})
with self.assertRaises(ValueError):
config_util.merge_external_params_with_configs(configs, hparams)
......@@ -375,14 +383,14 @@ class ConfigUtilTest(tf.test.TestCase):
pipeline_config.train_config.batch_size = 2
configs = self._create_and_load_test_configs(pipeline_config)
# Type should be an integer, but we're passing a string "10".
hparams = tf.contrib.training.HParams(**{"train_config.batch_size": "10"})
hparams = contrib_training.HParams(**{"train_config.batch_size": "10"})
with self.assertRaises(TypeError):
config_util.merge_external_params_with_configs(configs, hparams)
def testNewMomentumOptimizerValue(self):
"""Tests that new momentum value is updated appropriately."""
original_momentum_value = 0.4
hparams = tf.contrib.training.HParams(momentum_optimizer_value=1.1)
hparams = contrib_training.HParams(momentum_optimizer_value=1.1)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
......@@ -401,7 +409,7 @@ class ConfigUtilTest(tf.test.TestCase):
original_localization_weight = 0.1
original_classification_weight = 0.2
new_weight_ratio = 5.0
hparams = tf.contrib.training.HParams(
hparams = contrib_training.HParams(
classification_localization_weight_ratio=new_weight_ratio)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
......@@ -424,7 +432,7 @@ class ConfigUtilTest(tf.test.TestCase):
original_gamma = 1.0
new_alpha = 0.3
new_gamma = 2.0
hparams = tf.contrib.training.HParams(
hparams = contrib_training.HParams(
focal_loss_alpha=new_alpha, focal_loss_gamma=new_gamma)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
......@@ -623,6 +631,20 @@ class ConfigUtilTest(tf.test.TestCase):
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [-1, -1])
def testGetMaxNumContextFeaturesFromModelConfig(self):
model_config = model_pb2.DetectionModel()
model_config.faster_rcnn.context_config.max_num_context_features = 10
max_num_context_features = config_util.get_max_num_context_features(
model_config)
self.assertAllEqual(max_num_context_features, 10)
def testGetContextFeatureLengthFromModelConfig(self):
model_config = model_pb2.DetectionModel()
model_config.faster_rcnn.context_config.context_feature_length = 100
context_feature_length = config_util.get_context_feature_length(
model_config)
self.assertAllEqual(context_feature_length, 100)
def testEvalShuffle(self):
"""Tests that `eval_shuffle` keyword arguments are applied correctly."""
original_shuffle = True
......@@ -895,6 +917,22 @@ class ConfigUtilTest(tf.test.TestCase):
self.assertEqual(desired_retain_original_image_additional_channels,
retain_original_image_additional_channels)
def testUpdateNumClasses(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
self.assertEqual(config_util.get_number_of_classes(configs["model"]), 10)
config_util.merge_external_params_with_configs(
configs, kwargs_dict={"num_classes": 2})
self.assertEqual(config_util.get_number_of_classes(configs["model"]), 2)
def testRemoveUnecessaryEma(self):
input_dict = {
"expanded_conv_10/project/act_quant/min":
......
......@@ -18,7 +18,15 @@ json_utils wraps json.dump and json.dumps so that they can be used to safely
control the precision of floats when writing to json strings or files.
"""
import json
from json import encoder
import re
def FormatFloat(json_str, float_digits):
pattern = re.compile(r'\d+\.\d+')
float_repr = '{:.' + '{}'.format(float_digits) + 'f}'
def MRound(match):
return float_repr.format(float(match.group()))
return re.sub(pattern, MRound, json_str)
def Dump(obj, fid, float_digits=-1, **params):
......@@ -30,13 +38,8 @@ def Dump(obj, fid, float_digits=-1, **params):
float_digits: The number of digits of precision when writing floats out.
**params: Additional parameters to pass to json.dumps.
"""
original_encoder = encoder.FLOAT_REPR
if float_digits >= 0:
encoder.FLOAT_REPR = lambda o: format(o, '.%df' % float_digits)
try:
json.dump(obj, fid, **params)
finally:
encoder.FLOAT_REPR = original_encoder
json_str = Dumps(obj, float_digits, **params)
fid.write(json_str)
def Dumps(obj, float_digits=-1, **params):
......@@ -50,18 +53,10 @@ def Dumps(obj, float_digits=-1, **params):
Returns:
output: JSON string representation of obj.
"""
original_encoder = encoder.FLOAT_REPR
original_c_make_encoder = encoder.c_make_encoder
if float_digits >= 0:
encoder.FLOAT_REPR = lambda o: format(o, '.%df' % float_digits)
encoder.c_make_encoder = None
try:
output = json.dumps(obj, **params)
finally:
encoder.FLOAT_REPR = original_encoder
encoder.c_make_encoder = original_c_make_encoder
return output
json_str = json.dumps(obj, **params)
if float_digits > -1:
json_str = FormatFloat(json_str, float_digits)
return json_str
def PrettyParams(**params):
......
......@@ -32,9 +32,9 @@ class JsonUtilsTest(tf.test.TestCase):
def testDumpPassExtraParams(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump([1.0], f, float_digits=2, indent=3)
json_utils.Dump([1.12345], f, float_digits=2, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '[\n 1.00\n]')
self.assertEqual(f.read(), '[\n 1.12\n]')
def testDumpZeroPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
......@@ -51,8 +51,8 @@ class JsonUtilsTest(tf.test.TestCase):
self.assertEqual(f.read(), '1.012345')
def testDumpsReasonablePrecision(self):
s = json_utils.Dumps(1.0, float_digits=2)
self.assertEqual(s, '1.00')
s = json_utils.Dumps(1.12545, float_digits=2)
self.assertEqual(s, '1.13')
def testDumpsPassExtraParams(self):
s = json_utils.Dumps([1.0], float_digits=2, indent=3)
......
......@@ -85,6 +85,8 @@ def convert_label_map_to_categories(label_map,
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
'keypoints': (optional) a dictionary of keypoint string 'label' to integer
'id'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
......@@ -123,7 +125,18 @@ def convert_label_map_to_categories(label_map,
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
category = {'id': item.id, 'name': name}
if item.keypoints:
keypoints = {}
list_of_keypoint_ids = []
for kv in item.keypoints:
if kv.id in list_of_keypoint_ids:
raise ValueError('Duplicate keypoint ids are not allowed. '
'Found {} more than once'.format(kv.id))
keypoints[kv.label] = kv.id
list_of_keypoint_ids.append(kv.id)
category['keypoints'] = keypoints
categories.append(category)
return categories
......@@ -135,7 +148,7 @@ def load_labelmap(path):
Returns:
a StringIntLabelMapProto
"""
with tf.gfile.GFile(path, 'r') as fid:
with tf.io.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
......@@ -210,6 +223,8 @@ def create_categories_from_labelmap(label_map_path, use_display_name=True):
which has the following keys:
'id': an integer id uniquely identifying this category.
'name': string representing category name e.g., 'cat', 'dog'.
'keypoints': a dictionary of keypoint string label to integer id. It is only
returned when available in label map proto.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
......
......@@ -226,6 +226,59 @@ class LabelMapUtilTest(tf.test.TestCase):
}]
self.assertListEqual(expected_categories_list, categories)
def test_convert_label_map_with_keypoints_to_categories(self):
label_map_str = """
item {
id: 1
name: 'person'
keypoints: {
id: 1
label: 'nose'
}
keypoints: {
id: 2
label: 'ear'
}
}
"""
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
text_format.Merge(label_map_str, label_map_proto)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=1)
self.assertEqual('person', categories[0]['name'])
self.assertEqual(1, categories[0]['id'])
self.assertEqual(1, categories[0]['keypoints']['nose'])
self.assertEqual(2, categories[0]['keypoints']['ear'])
def test_disallow_duplicate_keypoint_ids(self):
label_map_str = """
item {
id: 1
name: 'person'
keypoints: {
id: 1
label: 'right_elbow'
}
keypoints: {
id: 1
label: 'left_elbow'
}
}
item {
id: 2
name: 'face'
keypoints: {
id: 3
label: 'ear'
}
}
"""
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
text_format.Merge(label_map_str, label_map_proto)
with self.assertRaises(ValueError):
label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=2)
def test_convert_label_map_to_categories_with_few_classes(self):
label_map_proto = self._generate_label_map(num_classes=4)
cat_no_offset = label_map_util.convert_label_map_to_categories(
......
......@@ -54,8 +54,8 @@ def extract_submodel(model, inputs, outputs, name=None):
for layer in model.layers:
layer_output = layer.output
layer_inputs = layer.input
output_to_layer[layer_output] = layer
output_to_layer_input[layer_output] = layer_inputs
output_to_layer[layer_output.ref()] = layer
output_to_layer_input[layer_output.ref()] = layer_inputs
model_inputs_dict = {}
memoized_results = {}
......@@ -63,20 +63,21 @@ def extract_submodel(model, inputs, outputs, name=None):
# Relies on recursion, very low limit in python
def _recurse_in_model(tensor):
"""Walk the existing model recursively to copy a submodel."""
if tensor in memoized_results:
return memoized_results[tensor]
if (tensor == inputs) or (isinstance(inputs, list) and tensor in inputs):
if tensor not in model_inputs_dict:
model_inputs_dict[tensor] = tf.keras.layers.Input(tensor=tensor)
out = model_inputs_dict[tensor]
if tensor.ref() in memoized_results:
return memoized_results[tensor.ref()]
if (tensor.ref() == inputs.ref()) or (
isinstance(inputs, list) and tensor in inputs):
if tensor.ref() not in model_inputs_dict:
model_inputs_dict[tensor.ref()] = tf.keras.layers.Input(tensor=tensor)
out = model_inputs_dict[tensor.ref()]
else:
cur_inputs = output_to_layer_input[tensor]
cur_layer = output_to_layer[tensor]
cur_inputs = output_to_layer_input[tensor.ref()]
cur_layer = output_to_layer[tensor.ref()]
if isinstance(cur_inputs, list):
out = cur_layer([_recurse_in_model(inp) for inp in cur_inputs])
else:
out = cur_layer(_recurse_in_model(cur_inputs))
memoized_results[tensor] = out
memoized_results[tensor.ref()] = out
return out
if isinstance(outputs, list):
......@@ -85,8 +86,8 @@ def extract_submodel(model, inputs, outputs, name=None):
model_outputs = _recurse_in_model(outputs)
if isinstance(inputs, list):
model_inputs = [model_inputs_dict[tensor] for tensor in inputs]
model_inputs = [model_inputs_dict[tensor.ref()] for tensor in inputs]
else:
model_inputs = model_inputs_dict[inputs]
model_inputs = model_inputs_dict[inputs.ref()]
return tf.keras.Model(inputs=model_inputs, outputs=model_outputs, name=name)
......@@ -31,6 +31,13 @@ from object_detection.utils import shape_utils
from object_detection.utils import spatial_transform_ops as spatial_ops
from object_detection.utils import static_shape
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import framework as contrib_framework
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
matmul_crop_and_resize = spatial_ops.matmul_crop_and_resize
multilevel_roi_align = spatial_ops.multilevel_roi_align
......@@ -588,8 +595,9 @@ def normalize_to_target(inputs,
initial_norm = depth * [target_norm_value]
else:
initial_norm = target_norm_value
target_norm = tf.contrib.framework.model_variable(
name='weights', dtype=tf.float32,
target_norm = contrib_framework.model_variable(
name='weights',
dtype=tf.float32,
initializer=tf.constant(initial_norm, dtype=tf.float32),
trainable=trainable)
if summarize:
......
......@@ -27,7 +27,14 @@ from object_detection.core import standard_fields as fields
from object_detection.utils import ops
from object_detection.utils import test_case
slim = tf.contrib.slim
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
class NormalizedToImageCoordinatesTest(tf.test.TestCase):
......@@ -760,7 +767,7 @@ class OpsTestNormalizeToTarget(tf.test.TestCase):
with self.test_session():
output = ops.normalize_to_target(inputs, target_norm_value, dim)
self.assertEqual(output.op.name, 'NormalizeToTarget/mul')
var_name = tf.contrib.framework.get_variables()[0].name
var_name = contrib_framework.get_variables()[0].name
self.assertEqual(var_name, 'NormalizeToTarget/weights:0')
def test_invalid_dim(self):
......
......@@ -24,9 +24,10 @@ import numpy as np
import tensorflow as tf
from object_detection.utils import patch_ops
from object_detection.utils import test_case
class GetPatchMaskTest(tf.test.TestCase, parameterized.TestCase):
class GetPatchMaskTest(test_case.TestCase, parameterized.TestCase):
def testMaskShape(self):
image_shape = [15, 10]
......@@ -108,13 +109,17 @@ class GetPatchMaskTest(tf.test.TestCase, parameterized.TestCase):
patch_ops.get_patch_mask(y, x, patch_size=3, image_shape=image_shape)
def testDynamicCoordinatesOutsideImageRaisesError(self):
image_shape = [15, 10]
x = tf.random_uniform([], minval=-2, maxval=-1, dtype=tf.int32)
y = tf.random_uniform([], minval=0, maxval=1, dtype=tf.int32)
mask = patch_ops.get_patch_mask(
y, x, patch_size=3, image_shape=image_shape)
def graph_fn():
image_shape = [15, 10]
x = tf.random_uniform([], minval=-2, maxval=-1, dtype=tf.int32)
y = tf.random_uniform([], minval=0, maxval=1, dtype=tf.int32)
mask = patch_ops.get_patch_mask(
y, x, patch_size=3, image_shape=image_shape)
return mask
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(mask)
self.execute(graph_fn, [])
@parameterized.parameters(
{'patch_size': 0},
......@@ -127,12 +132,17 @@ class GetPatchMaskTest(tf.test.TestCase, parameterized.TestCase):
0, 0, patch_size=patch_size, image_shape=image_shape)
def testDynamicNonPositivePatchSizeRaisesError(self):
image_shape = [6, 7]
patch_size = -1 * tf.random_uniform([], minval=0, maxval=3, dtype=tf.int32)
mask = patch_ops.get_patch_mask(
0, 0, patch_size=patch_size, image_shape=image_shape)
def graph_fn():
image_shape = [6, 7]
patch_size = -1 * tf.random_uniform([], minval=0, maxval=3,
dtype=tf.int32)
mask = patch_ops.get_patch_mask(
0, 0, patch_size=patch_size, image_shape=image_shape)
return mask
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(mask)
self.execute(graph_fn, [])
if __name__ == '__main__':
......
......@@ -24,6 +24,14 @@ import tensorflow as tf
from object_detection.utils import shape_utils
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import framework as contrib_framework
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
class UtilTest(tf.test.TestCase):
......@@ -124,7 +132,7 @@ class UtilTest(tf.test.TestCase):
tensor = tf.placeholder(tf.float32, shape=(None, 2, 3))
combined_shape = shape_utils.combined_static_and_dynamic_shape(
tensor)
self.assertTrue(tf.contrib.framework.is_tensor(combined_shape[0]))
self.assertTrue(contrib_framework.is_tensor(combined_shape[0]))
self.assertListEqual(combined_shape[1:], [2, 3])
def test_pad_or_clip_nd_tensor(self):
......
......@@ -217,11 +217,20 @@ def pad_to_max_size(features):
true_feature_shapes: A 2D int32 tensor of shape [num_levels, 2] containing
height and width of the feature maps before padding.
"""
heights = [tf.shape(feature)[1] for feature in features]
widths = [tf.shape(feature)[2] for feature in features]
max_height = tf.reduce_max(heights)
max_width = tf.reduce_max(widths)
if len(features) == 1:
return tf.expand_dims(features[0],
1), tf.expand_dims(tf.shape(features[0])[1:3], 0)
if all([feature.shape.is_fully_defined() for feature in features]):
heights = [feature.shape[1] for feature in features]
widths = [feature.shape[2] for feature in features]
max_height = max(heights)
max_width = max(widths)
else:
heights = [tf.shape(feature)[1] for feature in features]
widths = [tf.shape(feature)[2] for feature in features]
max_height = tf.reduce_max(heights)
max_width = tf.reduce_max(widths)
features_all = [
tf.image.pad_to_bounding_box(feature, 0, 0, max_height,
max_width) for feature in features
......@@ -405,7 +414,7 @@ def multilevel_roi_align(features, boxes, box_levels, output_size,
def native_crop_and_resize(image, boxes, crop_size, scope=None):
"""Same as `matmul_crop_and_resize` but uses tf.image.crop_and_resize."""
def get_box_inds(proposals):
proposals_shape = proposals.get_shape().as_list()
proposals_shape = proposals.shape.as_list()
if any(dim is None for dim in proposals_shape):
proposals_shape = tf.shape(proposals)
ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)
......
......@@ -360,7 +360,7 @@ class MultiLevelRoIAlignTest(test_case.TestCase):
self.assertAllClose(roi_features[0][4], 5 * np.ones((2, 2, 1)))
def test_large_input(self):
if test_case.FLAGS.tpu_test:
if self.has_tpu():
input_size = 1408
min_level = 2
max_level = 6
......@@ -368,36 +368,31 @@ class MultiLevelRoIAlignTest(test_case.TestCase):
num_boxes = 512
num_filters = 256
output_size = [7, 7]
with self.test_session() as sess:
features = []
for level in range(min_level, max_level + 1):
feat_size = int(input_size / 2**level)
features.append(tf.constant(
np.reshape(
np.arange(
batch_size * feat_size * feat_size * num_filters,
dtype=np.float32),
[batch_size, feat_size, feat_size, num_filters]),
dtype=tf.bfloat16))
boxes = np.array([
[[0, 0, 256, 256]]*num_boxes,
], dtype=np.float32) / input_size
boxes = np.tile(boxes, [batch_size, 1, 1])
tf_boxes = tf.constant(boxes)
tf_levels = tf.random_uniform([batch_size, num_boxes], maxval=5,
dtype=tf.int32)
def crop_and_resize_fn():
return spatial_ops.multilevel_roi_align(
features, tf_boxes, tf_levels, output_size)
tpu_crop_and_resize_fn = tf.contrib.tpu.rewrite(crop_and_resize_fn)
sess.run(tf.contrib.tpu.initialize_system())
sess.run(tf.global_variables_initializer())
roi_features = sess.run(tpu_crop_and_resize_fn)
self.assertEqual(roi_features[0].shape,
(batch_size, num_boxes, output_size[0], output_size[1],
num_filters))
sess.run(tf.contrib.tpu.shutdown_system())
features = []
for level in range(min_level, max_level + 1):
feat_size = int(input_size / 2**level)
features.append(
np.reshape(
np.arange(
batch_size * feat_size * feat_size * num_filters,
dtype=np.float32),
[batch_size, feat_size, feat_size, num_filters]))
boxes = np.array([
[[0, 0, 256, 256]]*num_boxes,
], dtype=np.float32) / input_size
boxes = np.tile(boxes, [batch_size, 1, 1])
levels = np.random.randint(5, size=[batch_size, num_boxes],
dtype=np.int32)
def crop_and_resize_fn():
tf_features = [
tf.constant(feature, dtype=tf.bfloat16) for feature in features
]
return spatial_ops.multilevel_roi_align(
tf_features, tf.constant(boxes), tf.constant(levels), output_size)
roi_features = self.execute_tpu(crop_and_resize_fn, [])
self.assertEqual(roi_features.shape,
(batch_size, num_boxes, output_size[0],
output_size[1], num_filters))
class MatMulCropAndResizeTest(test_case.TestCase):
......@@ -517,13 +512,6 @@ class MatMulCropAndResizeTest(test_case.TestCase):
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testInvalidInputShape(self):
image = tf.constant([[[1], [2]], [[3], [4]]], dtype=tf.float32)
boxes = tf.constant([[-1, -1, 1, 1]], dtype=tf.float32)
crop_size = [4, 4]
with self.assertRaises(ValueError):
spatial_ops.matmul_crop_and_resize(image, boxes, crop_size)
class NativeCropAndResizeTest(test_case.TestCase):
......
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions used by target assigner."""
import tensorflow as tf
from object_detection.utils import shape_utils
def image_shape_to_grids(height, width):
"""Computes xy-grids given the shape of the image.
Args:
height: The height of the image.
width: The width of the image.
Returns:
A tuple of two tensors:
y_grid: A float tensor with shape [height, width] representing the
y-coordinate of each pixel grid.
x_grid: A float tensor with shape [height, width] representing the
x-coordinate of each pixel grid.
"""
out_height = tf.cast(height, tf.float32)
out_width = tf.cast(width, tf.float32)
x_range = tf.range(out_width, dtype=tf.float32)
y_range = tf.range(out_height, dtype=tf.float32)
x_grid, y_grid = tf.meshgrid(x_range, y_range, indexing='xy')
return (y_grid, x_grid)
def coordinates_to_heatmap(y_grid,
x_grid,
y_coordinates,
x_coordinates,
sigma,
channel_onehot,
channel_weights=None):
"""Returns the heatmap targets from a set of point coordinates.
This function maps a set of point coordinates to the output heatmap image
applied using a Gaussian kernel. Note that this function be can used by both
object detection and keypoint estimation tasks. For object detection, the
"channel" refers to the object class. For keypoint estimation, the "channel"
refers to the number of keypoint types.
Args:
y_grid: A 2D tensor with shape [height, width] which contains the grid
y-coordinates given in the (output) image dimensions.
x_grid: A 2D tensor with shape [height, width] which contains the grid
x-coordinates given in the (output) image dimensions.
y_coordinates: A 1D tensor with shape [num_instances] representing the
y-coordinates of the instances in the output space coordinates.
x_coordinates: A 1D tensor with shape [num_instances] representing the
x-coordinates of the instances in the output space coordinates.
sigma: A 1D tensor with shape [num_instances] representing the standard
deviation of the Gaussian kernel to be applied to the point.
channel_onehot: A 2D tensor with shape [num_instances, num_channels]
representing the one-hot encoded channel labels for each point.
channel_weights: A 1D tensor with shape [num_instances] corresponding to the
weight of each instance.
Returns:
heatmap: A tensor of size [height, width, num_channels] representing the
heatmap. Output (height, width) match the dimensions of the input grids.
"""
num_instances, num_channels = (
shape_utils.combined_static_and_dynamic_shape(channel_onehot))
x_grid = tf.expand_dims(x_grid, 2)
y_grid = tf.expand_dims(y_grid, 2)
# The raw center coordinates in the output space.
x_diff = x_grid - tf.math.floor(x_coordinates)
y_diff = y_grid - tf.math.floor(y_coordinates)
squared_distance = x_diff**2 + y_diff**2
gaussian_map = tf.exp(-squared_distance / (2 * sigma * sigma))
reshaped_gaussian_map = tf.expand_dims(gaussian_map, axis=-1)
reshaped_channel_onehot = tf.reshape(channel_onehot,
(1, 1, num_instances, num_channels))
gaussian_per_box_per_class_map = (
reshaped_gaussian_map * reshaped_channel_onehot)
if channel_weights is not None:
reshaped_weights = tf.reshape(channel_weights, (1, 1, num_instances, 1))
gaussian_per_box_per_class_map *= reshaped_weights
# Take maximum along the "instance" dimension so that all per-instance
# heatmaps of the same class are merged together.
heatmap = tf.reduce_max(gaussian_per_box_per_class_map, axis=2)
# Maximum of an empty tensor is -inf, the following is to avoid that.
heatmap = tf.maximum(heatmap, 0)
return heatmap
def compute_floor_offsets_with_indices(y_source,
x_source,
y_target=None,
x_target=None):
"""Computes offsets from floored source(floored) to target coordinates.
This function computes the offsets from source coordinates ("floored" as if
they were put on the grids) to target coordinates. Note that the input
coordinates should be the "absolute" coordinates in terms of the output image
dimensions as opposed to the normalized coordinates (i.e. values in [0, 1]).
Args:
y_source: A tensor with shape [num_points] representing the absolute
y-coordinates (in the output image space) of the source points.
x_source: A tensor with shape [num_points] representing the absolute
x-coordinates (in the output image space) of the source points.
y_target: A tensor with shape [num_points] representing the absolute
y-coordinates (in the output image space) of the target points. If not
provided, then y_source is used as the targets.
x_target: A tensor with shape [num_points] representing the absolute
x-coordinates (in the output image space) of the target points. If not
provided, then x_source is used as the targets.
Returns:
A tuple of two tensors:
offsets: A tensor with shape [num_points, 2] representing the offsets of
each input point.
indices: A tensor with shape [num_points, 2] representing the indices of
where the offsets should be retrieved in the output image dimension
space.
"""
y_source_floored = tf.floor(y_source)
x_source_floored = tf.floor(x_source)
if y_target is None:
y_target = y_source
if x_target is None:
x_target = x_source
y_offset = y_target - y_source_floored
x_offset = x_target - x_source_floored
y_source_indices = tf.cast(y_source_floored, tf.int32)
x_source_indices = tf.cast(x_source_floored, tf.int32)
indices = tf.stack([y_source_indices, x_source_indices], axis=1)
offsets = tf.stack([y_offset, x_offset], axis=1)
return offsets, indices
def get_valid_keypoint_mask_for_class(keypoint_coordinates,
class_id,
class_onehot,
class_weights=None,
keypoint_indices=None):
"""Mask keypoints by their class ids and indices.
For a given task, we may want to only consider a subset of instances or
keypoints. This function is used to provide the mask (in terms of weights) to
mark those elements which should be considered based on the classes of the
instances and optionally, their keypoint indices. Note that the NaN values
in the keypoints will also be masked out.
Args:
keypoint_coordinates: A float tensor with shape [num_instances,
num_keypoints, 2] which contains the coordinates of each keypoint.
class_id: An integer representing the target class id to be selected.
class_onehot: A 2D tensor of shape [num_instances, num_classes] repesents
the onehot (or k-hot) encoding of the class for each instance.
class_weights: A 1D tensor of shape [num_instances] repesents the weight of
each instance. If not provided, all instances are weighted equally.
keypoint_indices: A list of integers representing the keypoint indices used
to select the values on the keypoint dimension. If provided, the output
dimension will be [num_instances, len(keypoint_indices)]
Returns:
A tuple of tensors:
mask: A float tensor of shape [num_instances, K], where K is num_keypoints
or len(keypoint_indices) if provided. The tensor has values either 0 or
1 indicating whether an element in the input keypoints should be used.
keypoints_nan_to_zeros: Same as input keypoints with the NaN values
replaced by zeros and selected columns corresponding to the
keypoint_indices (if provided). The shape of this tensor will always be
the same as the output mask.
"""
num_keypoints = tf.shape(keypoint_coordinates)[1]
class_mask = class_onehot[:, class_id]
reshaped_class_mask = tf.tile(
tf.expand_dims(class_mask, axis=-1), multiples=[1, num_keypoints])
not_nan = tf.math.logical_not(tf.math.is_nan(keypoint_coordinates))
mask = reshaped_class_mask * tf.cast(not_nan[:, :, 0], dtype=tf.float32)
keypoints_nan_to_zeros = tf.where(not_nan, keypoint_coordinates,
tf.zeros_like(keypoint_coordinates))
if class_weights is not None:
reshaped_class_weight = tf.tile(
tf.expand_dims(class_weights, axis=-1), multiples=[1, num_keypoints])
mask = mask * reshaped_class_weight
if keypoint_indices is not None:
mask = tf.gather(mask, indices=keypoint_indices, axis=1)
keypoints_nan_to_zeros = tf.gather(
keypoints_nan_to_zeros, indices=keypoint_indices, axis=1)
return mask, keypoints_nan_to_zeros
def blackout_pixel_weights_by_box_regions(height, width, boxes, blackout):
"""Blackout the pixel weights in the target box regions.
This function is used to generate the pixel weight mask (usually in the output
image dimension). The mask is to ignore some regions when computing loss.
Args:
height: int, height of the (output) image.
width: int, width of the (output) image.
boxes: A float tensor with shape [num_instances, 4] indicating the
coordinates of the four corners of the boxes.
blackout: A boolean tensor with shape [num_instances] indicating whether to
blackout (zero-out) the weights within the box regions.
Returns:
A float tensor with shape [height, width] where all values within the
regions of the blackout boxes are 0.0 and 1.0 else where.
"""
(y_grid, x_grid) = image_shape_to_grids(height, width)
y_grid = tf.expand_dims(y_grid, axis=0)
x_grid = tf.expand_dims(x_grid, axis=0)
y_min = tf.expand_dims(boxes[:, 0:1], axis=-1)
x_min = tf.expand_dims(boxes[:, 1:2], axis=-1)
y_max = tf.expand_dims(boxes[:, 2:3], axis=-1)
x_max = tf.expand_dims(boxes[:, 3:], axis=-1)
# Make the mask with all 1.0 in the box regions.
# Shape: [num_instances, height, width]
in_boxes = tf.cast(
tf.logical_and(
tf.logical_and(y_grid >= y_min, y_grid <= y_max),
tf.logical_and(x_grid >= x_min, x_grid <= x_max)),
dtype=tf.float32)
# Shape: [num_instances, height, width]
blackout = tf.tile(
tf.expand_dims(tf.expand_dims(blackout, axis=-1), axis=-1),
[1, height, width])
# Select only the boxes specified by blackout.
selected_in_boxes = tf.where(blackout, in_boxes, tf.zeros_like(in_boxes))
out_boxes = tf.reduce_max(selected_in_boxes, axis=0)
out_boxes = tf.ones_like(out_boxes) - out_boxes
return out_boxes
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.target_assigner_utils."""
import numpy as np
import tensorflow as tf
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import test_case
class TargetUtilTest(test_case.TestCase):
def test_image_shape_to_grids(self):
def graph_fn():
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height=2, width=3)
return y_grid, x_grid
expected_y_grid = np.array([[0, 0, 0], [1, 1, 1]])
expected_x_grid = np.array([[0, 1, 2], [0, 1, 2]])
y_grid, x_grid = self.execute(graph_fn, [])
np.testing.assert_array_equal(y_grid, expected_y_grid)
np.testing.assert_array_equal(x_grid, expected_x_grid)
def test_coordinates_to_heatmap(self):
def graph_fn():
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height=3, width=5)
y_coordinates = tf.constant([1.5, 0.5], dtype=tf.float32)
x_coordinates = tf.constant([2.5, 4.5], dtype=tf.float32)
sigma = tf.constant([0.1, 0.5], dtype=tf.float32)
channel_onehot = tf.constant([[1, 0, 0], [0, 1, 0]], dtype=tf.float32)
channel_weights = tf.constant([1, 1], dtype=tf.float32)
heatmap = ta_utils.coordinates_to_heatmap(y_grid, x_grid, y_coordinates,
x_coordinates, sigma,
channel_onehot, channel_weights)
return heatmap
heatmap = self.execute(graph_fn, [])
# Peak at (1, 2) for the first class.
self.assertAlmostEqual(1.0, heatmap[1, 2, 0])
# Peak at (0, 4) for the second class.
self.assertAlmostEqual(1.0, heatmap[0, 4, 1])
def test_compute_floor_offsets_with_indices_onlysource(self):
def graph_fn():
y_source = tf.constant([1.5, 0.3], dtype=tf.float32)
x_source = tf.constant([2.5, 4.2], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(offsets,
np.array([[0.5, 0.5], [0.3, 0.2]]))
np.testing.assert_array_almost_equal(indices,
np.array([[1, 2], [0, 4]]))
def test_compute_floor_offsets_with_indices_and_targets(self):
def graph_fn():
y_source = tf.constant([1.5, 0.3], dtype=tf.float32)
x_source = tf.constant([2.5, 4.2], dtype=tf.float32)
y_target = tf.constant([2.1, 0.1], dtype=tf.float32)
x_target = tf.constant([1.2, 4.5], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source, y_target, x_target)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(offsets,
np.array([[1.1, -0.8], [0.1, 0.5]]))
np.testing.assert_array_almost_equal(indices,
np.array([[1, 2], [0, 4]]))
def test_get_valid_keypoints_mask(self):
def graph_fn():
class_onehot = tf.constant(
[[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1]], dtype=tf.float32)
keypoints = tf.constant(
[[0.1, float('nan'), 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9],
[3.2, 4.3, float('nan'), 0.2]],
dtype=tf.float32)
keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoint_coordinates,
class_id=2,
class_onehot=class_onehot,
keypoint_indices=[1, 2])
return mask, keypoints_nan_to_zeros
keypoints = np.array([[0.0, 0.2],
[0.0, 0.1],
[4.3, 0.0]])
expected_mask = np.array([[0, 1], [0, 0], [1, 0]])
expected_keypoints = np.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])
np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,
expected_keypoints)
def test_get_valid_keypoints_with_mask(self):
def graph_fn():
class_onehot = tf.constant(
[[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1]], dtype=tf.float32)
keypoints = tf.constant(
[[0.1, float('nan'), 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9],
[3.2, 4.3, float('nan'), 0.2]],
dtype=tf.float32)
keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)
weights = tf.constant([0.0, 0.0, 1.0])
mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoint_coordinates,
class_id=2,
class_onehot=class_onehot,
class_weights=weights,
keypoint_indices=[1, 2])
return mask, keypoints_nan_to_zeros
expected_mask = np.array([[0, 0], [0, 0], [1, 0]])
keypoints = np.array([[0.0, 0.2],
[0.0, 0.1],
[4.3, 0.0]])
expected_keypoints = np.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])
np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,
expected_keypoints)
def test_blackout_pixel_weights_by_box_regions(self):
def graph_fn():
boxes = tf.constant(
[[0.0, 0.0, 5, 5], [0.0, 0.0, 10.0, 20.0], [6.0, 12.0, 8.0, 18.0]],
dtype=tf.float32)
blackout = tf.constant([True, False, True], dtype=tf.bool)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
return output
output = self.execute(graph_fn, [])
# All zeros in region [0:6, 0:6].
self.assertAlmostEqual(np.sum(output[0:6, 0:6]), 0.0)
# All zeros in region [12:19, 6:9].
self.assertAlmostEqual(np.sum(output[6:9, 12:19]), 0.0)
# All other pixel weights should be 1.0.
# 20 * 10 - 6 * 6 - 3 * 7 = 143.0
self.assertAlmostEqual(np.sum(output), 143.0)
if __name__ == '__main__':
tf.test.main()
......@@ -12,97 +12,261 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A convenience wrapper around tf.test.TestCase to enable TPU tests."""
"""A convenience wrapper around tf.test.TestCase to test with TPU, TF1, TF2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import zip
import tensorflow as tf
from tensorflow.contrib import tpu
from tensorflow.python import tf2 # pylint: disable=import-outside-toplevel
if not tf2.enabled():
from tensorflow.contrib import tpu as contrib_tpu # pylint: disable=g-import-not-at-top, line-too-long
flags = tf.app.flags
flags.DEFINE_bool('tpu_test', False, 'Whether to configure test for TPU.')
flags.DEFINE_bool('tpu_test', False, 'Deprecated Flag.')
FLAGS = flags.FLAGS
class TestCase(tf.test.TestCase):
"""Base Test class to handle execution under {TF1.X, TF2.X} x {TPU, CPU}.
This class determines the TF version and availability of TPUs to set up
tests appropriately.
"""
class TestCase(tf.test.TestCase):
"""Extends tf.test.TestCase to optionally allow running tests on TPU."""
def maybe_extract_single_output(self, outputs):
if isinstance(outputs, list) or isinstance(outputs, tuple):
if isinstance(outputs[0], tf.Tensor):
outputs_np = [output.numpy() for output in outputs]
else:
outputs_np = outputs
if len(outputs_np) == 1:
return outputs_np[0]
else:
return outputs_np
else:
if isinstance(outputs, tf.Tensor):
return outputs.numpy()
else:
return outputs
def has_tpu(self):
"""Returns whether there are any logical TPU devices."""
return bool(tf.config.experimental.list_logical_devices(device_type='TPU'))
def execute_tpu(self, graph_fn, inputs):
"""Constructs the graph, executes it on TPU and returns the result.
def is_tf2(self):
"""Returns whether TF2 is enabled."""
return tf2.enabled()
def execute_tpu_tf1(self, compute_fn, inputs):
"""Executes compute_fn on TPU with Tensorflow 1.X.
Args:
graph_fn: a callable that constructs the tensorflow graph to test. The
arguments of this function should correspond to `inputs`.
inputs: a list of numpy arrays to feed input to the computation graph.
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a scalar returned from executing the tensorflow
graph.
A list of numpy arrays or a single numpy array.
"""
with self.test_session(graph=tf.Graph()) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
tpu_computation = tpu.rewrite(graph_fn, placeholders)
sess.run(tpu.initialize_system())
def wrap_graph_fn(*args, **kwargs):
results = compute_fn(*args, **kwargs)
if (not (isinstance(results, dict) or isinstance(results, tf.Tensor))
and hasattr(results, '__iter__')):
results = list(results)
return results
tpu_computation = contrib_tpu.rewrite(wrap_graph_fn, placeholders)
sess.run(contrib_tpu.initialize_system())
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(tpu_computation,
feed_dict=dict(zip(placeholders, inputs)))
sess.run(tpu.shutdown_system())
if (hasattr(materialized_results, '__len__') and
len(materialized_results) == 1 and
(isinstance(materialized_results, list) or
isinstance(materialized_results, tuple))):
materialized_results = materialized_results[0]
return materialized_results
sess.run(contrib_tpu.shutdown_system())
return self.maybe_extract_single_output(materialized_results)
def execute_cpu(self, graph_fn, inputs):
"""Constructs the graph, executes it on CPU and returns the result.
def execute_tpu_tf2(self, compute_fn, inputs):
"""Executes compute_fn on TPU with Tensorflow 2.X.
Args:
graph_fn: a callable that constructs the tensorflow graph to test. The
arguments of this function should correspond to `inputs`.
inputs: a list of numpy arrays to feed input to the computation graph.
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a scalar returned from executing the tensorflow
graph.
A list of numpy arrays or a single numpy array.
"""
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology, num_replicas=1)
strategy = tf.distribute.experimental.TPUStrategy(
resolver, device_assignment=device_assignment)
@tf.function
def run():
tf_inputs = [tf.constant(input_t) for input_t in inputs]
return strategy.run(compute_fn, args=tf_inputs)
outputs = run()
tf.tpu.experimental.shutdown_tpu_system()
return self.maybe_extract_single_output(outputs)
def execute_cpu_tf1(self, compute_fn, inputs):
"""Executes compute_fn on CPU with Tensorflow 1.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single numpy array.
"""
if self.is_tf2():
raise ValueError('Required version Tenforflow 1.X is not available.')
with self.test_session(graph=tf.Graph()) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
results = graph_fn(*placeholders)
results = compute_fn(*placeholders)
if (not (isinstance(results, dict) or isinstance(results, tf.Tensor)) and
hasattr(results, '__iter__')):
results = list(results)
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(results, feed_dict=dict(zip(placeholders,
inputs)))
return self.maybe_extract_single_output(materialized_results)
def execute_cpu_tf2(self, compute_fn, inputs):
"""Executes compute_fn on CPU with Tensorflow 2.X.
if (hasattr(materialized_results, '__len__') and
len(materialized_results) == 1 and
(isinstance(materialized_results, list) or
isinstance(materialized_results, tuple))):
materialized_results = materialized_results[0]
return materialized_results
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single numpy array.
"""
if not self.is_tf2():
raise ValueError('Required version TensorFlow 2.0 is not available.')
@tf.function
def run():
tf_inputs = [tf.constant(input_t) for input_t in inputs]
return compute_fn(*tf_inputs)
return self.maybe_extract_single_output(run())
def execute_cpu(self, compute_fn, inputs):
"""Executes compute_fn on CPU.
Depending on the underlying TensorFlow installation (build deps) runs in
either TF 1.X or TF 2.X style.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single tensor.
"""
if self.is_tf2():
return self.execute_cpu_tf2(compute_fn, inputs)
else:
return self.execute_cpu_tf1(compute_fn, inputs)
def execute_tpu(self, compute_fn, inputs):
"""Executes compute_fn on TPU.
Depending on the underlying TensorFlow installation (build deps) runs in
either TF 1.X or TF 2.X style.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single tensor.
"""
if not self.has_tpu():
raise ValueError('No TPU Device found.')
if self.is_tf2():
return self.execute_tpu_tf2(compute_fn, inputs)
else:
return self.execute_tpu_tf1(compute_fn, inputs)
def execute_tf2(self, compute_fn, inputs):
"""Runs compute_fn with TensorFlow 2.0.
Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single tensor.
"""
if not self.is_tf2():
raise ValueError('Required version TensorFlow 2.0 is not available.')
if self.has_tpu():
return self.execute_tpu_tf2(compute_fn, inputs)
else:
return self.execute_cpu_tf2(compute_fn, inputs)
def execute_tf1(self, compute_fn, inputs):
"""Runs compute_fn with TensorFlow 1.X.
Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single tensor.
"""
if self.is_tf2():
raise ValueError('Required version Tenforflow 1.X is not available.')
if self.has_tpu():
return self.execute_tpu_tf1(compute_fn, inputs)
else:
return self.execute_cpu_tf1(compute_fn, inputs)
def execute(self, graph_fn, inputs):
"""Constructs the graph, creates a test session and returns the results.
def execute(self, compute_fn, inputs):
"""Runs compute_fn with inputs and returns results.
The graph is executed either on TPU or CPU based on the `tpu_test` flag.
* Executes in either TF1.X or TF2.X style based on the TensorFlow version.
* Executes on TPU if available, otherwise executes on CPU.
Args:
graph_fn: a callable that constructs the tensorflow graph to test. The
arguments of this function should correspond to `inputs`.
inputs: a list of numpy arrays to feed input to the computation graph.
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a scalar returned from executing the tensorflow
graph.
A list of numpy arrays or a single tensor.
"""
if FLAGS.tpu_test:
return self.execute_tpu(graph_fn, inputs)
if self.has_tpu() and tf2.enabled():
return self.execute_tpu_tf2(compute_fn, inputs)
elif not self.has_tpu() and tf2.enabled():
return self.execute_cpu_tf2(compute_fn, inputs)
elif self.has_tpu() and not tf2.enabled():
return self.execute_tpu_tf1(compute_fn, inputs)
else:
return self.execute_cpu(graph_fn, inputs)
return self.execute_cpu_tf1(compute_fn, inputs)
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.third_party.tensorflow_models.object_detection.utils.test_case."""
import numpy as np
import tensorflow as tf
from object_detection.utils import test_case
class TestCaseTest(test_case.TestCase):
def test_simple(self):
def graph_fn(tensora, tensorb):
return tf.tensordot(tensora, tensorb, axes=1)
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output, 40.0)
def test_two_outputs(self):
def graph_fn(tensora, tensorb):
return tensora + tensorb, tensora - tensorb
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output[0], tensora_np + tensorb_np)
self.assertAllClose(output[1], tensora_np - tensorb_np)
def test_function_with_tf_assert(self):
def compute_fn(image):
return tf.image.pad_to_bounding_box(image, 0, 0, 40, 40)
image_np = np.random.rand(2, 20, 30, 3)
output = self.execute(compute_fn, [image_np])
self.assertAllEqual(output.shape, [2, 40, 40, 3])
def test_tf2_only_test(self):
"""Set up tests only to run with TF2."""
if self.is_tf2():
def graph_fn(tensora, tensorb):
return tensora + tensorb, tensora - tensorb
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute_tf2(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output[0], tensora_np + tensorb_np)
self.assertAllClose(output[1], tensora_np - tensorb_np)
def test_tpu_only_test(self):
"""Set up tests only to run with TPU."""
if self.has_tpu():
def graph_fn(tensora, tensorb):
return tensora + tensorb, tensora - tensorb
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute_tpu(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output[0], tensora_np + tensorb_np)
self.assertAllClose(output[1], tensora_np - tensorb_np)
if __name__ == '__main__':
tf.test.main()
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to check TensorFlow Version."""
from tensorflow.python import tf2 # pylint: disable=import-outside-toplevel
def is_tf1():
"""Whether current TensorFlow Version is 1.X."""
return not tf2.enabled()
def is_tf2():
"""Whether current TensorFlow Version is 2.X."""
return tf2.enabled()
......@@ -25,10 +25,9 @@ import re
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.python.ops import variables as tf_variables
slim = tf.contrib.slim
# TODO(derekjchow): Consider replacing with tf.contrib.filter_variables in
# tensorflow/contrib/framework/python/ops/variables.py
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment