Unverified Commit 420a7253 authored by pkulzc's avatar pkulzc Committed by GitHub
Browse files

Refactor tests for Object Detection API. (#8688)

Internal changes

--

PiperOrigin-RevId: 316837667
parent d0ef3913
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing ResNet v1 FPN models for the CenterNet meta architecture."""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import center_net_resnet_v1_fpn_feature_extractor
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetResnetV1FpnFeatureExtractorTest(test_case.TestCase,
parameterized.TestCase):
@parameterized.parameters(
{'resnet_type': 'resnet_v1_50'},
{'resnet_type': 'resnet_v1_101'},
)
def test_correct_output_size(self, resnet_type):
"""Verify that shape of features returned by the backbone is correct."""
model = center_net_resnet_v1_fpn_feature_extractor.\
CenterNetResnetV1FpnFeatureExtractor(resnet_type)
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
self.assertEqual(self.execute(graph_fn, []).shape, (8, 56, 56, 64))
if __name__ == '__main__':
tf.test.main()
...@@ -14,13 +14,16 @@ ...@@ -14,13 +14,16 @@
# ============================================================================== # ==============================================================================
"""Tests for embedded_ssd_mobilenet_v1_feature_extractor.""" """Tests for embedded_ssd_mobilenet_v1_feature_extractor."""
import unittest
import numpy as np import numpy as np
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
from object_detection.models import embedded_ssd_mobilenet_v1_feature_extractor from object_detection.models import embedded_ssd_mobilenet_v1_feature_extractor
from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_feature_extractor_test
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class EmbeddedSSDMobileNetV1FeatureExtractorTest( class EmbeddedSSDMobileNetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase): ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
......
...@@ -14,12 +14,14 @@ ...@@ -14,12 +14,14 @@
# ============================================================================== # ==============================================================================
"""Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor.""" """Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnInceptionResnetV2FeatureExtractorTest(tf.test.TestCase): class FasterRcnnInceptionResnetV2FeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride): def _build_feature_extractor(self, first_stage_features_stride):
......
...@@ -14,12 +14,14 @@ ...@@ -14,12 +14,14 @@
# ============================================================================== # ==============================================================================
"""Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor.""" """Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase): class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride): def _build_feature_extractor(self, first_stage_features_stride):
...@@ -38,11 +40,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase): ...@@ -38,11 +40,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
name='TestScope')(preprocessed_inputs) name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map) features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer() self.assertAllEqual(features_shape.numpy(), [1, 19, 19, 1088])
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 19, 19, 1088])
def test_extract_proposal_features_stride_eight(self): def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor( feature_extractor = self._build_feature_extractor(
...@@ -53,11 +51,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase): ...@@ -53,11 +51,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
name='TestScope')(preprocessed_inputs) name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map) features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer() self.assertAllEqual(features_shape.numpy(), [1, 28, 28, 1088])
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 28, 28, 1088])
def test_extract_proposal_features_half_size_input(self): def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor( feature_extractor = self._build_feature_extractor(
...@@ -67,25 +61,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase): ...@@ -67,25 +61,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs) name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map) features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1088])
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 1088])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
def test_extract_box_classifier_features_returns_expected_size(self): def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor( feature_extractor = self._build_feature_extractor(
...@@ -97,12 +73,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase): ...@@ -97,12 +73,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
proposal_classifier_features = ( proposal_classifier_features = (
model(proposal_feature_maps)) model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features) features_shape = tf.shape(proposal_classifier_features)
self.assertAllEqual(features_shape.numpy(), [2, 8, 8, 1536])
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [2, 8, 8, 1536])
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -14,13 +14,15 @@ ...@@ -14,13 +14,15 @@
# ============================================================================== # ==============================================================================
"""Tests for faster_rcnn_inception_v2_feature_extractor.""" """Tests for faster_rcnn_inception_v2_feature_extractor."""
import unittest
import numpy as np import numpy as np
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_inception_v2_feature_extractor as faster_rcnn_inception_v2 from object_detection.models import faster_rcnn_inception_v2_feature_extractor as faster_rcnn_inception_v2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnInceptionV2FeatureExtractorTest(tf.test.TestCase): class FasterRcnnInceptionV2FeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride): def _build_feature_extractor(self, first_stage_features_stride):
......
...@@ -14,13 +14,15 @@ ...@@ -14,13 +14,15 @@
# ============================================================================== # ==============================================================================
"""Tests for faster_rcnn_mobilenet_v1_feature_extractor.""" """Tests for faster_rcnn_mobilenet_v1_feature_extractor."""
import unittest
import numpy as np import numpy as np
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_mobilenet_v1_feature_extractor as faster_rcnn_mobilenet_v1 from object_detection.models import faster_rcnn_mobilenet_v1_feature_extractor as faster_rcnn_mobilenet_v1
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnMobilenetV1FeatureExtractorTest(tf.test.TestCase): class FasterRcnnMobilenetV1FeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride): def _build_feature_extractor(self, first_stage_features_stride):
......
...@@ -31,8 +31,14 @@ import tf_slim as slim ...@@ -31,8 +31,14 @@ import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import variables_helper from object_detection.utils import variables_helper
from nets.nasnet import nasnet
from nets.nasnet import nasnet_utils # pylint: disable=g-import-not-at-top
try:
from nets.nasnet import nasnet
from nets.nasnet import nasnet_utils
except: # pylint: disable=bare-except
pass
# pylint: enable=g-import-not-at-top
arg_scope = slim.arg_scope arg_scope = slim.arg_scope
......
...@@ -14,12 +14,14 @@ ...@@ -14,12 +14,14 @@
# ============================================================================== # ==============================================================================
"""Tests for models.faster_rcnn_nas_feature_extractor.""" """Tests for models.faster_rcnn_nas_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnNASFeatureExtractorTest(tf.test.TestCase): class FasterRcnnNASFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride): def _build_feature_extractor(self, first_stage_features_stride):
......
...@@ -30,7 +30,11 @@ import tf_slim as slim ...@@ -30,7 +30,11 @@ import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import variables_helper from object_detection.utils import variables_helper
from nets.nasnet import nasnet_utils from nets.nasnet import nasnet_utils
from nets.nasnet import pnasnet
try:
from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top
except: # pylint: disable=bare-except
pass
arg_scope = slim.arg_scope arg_scope = slim.arg_scope
......
...@@ -14,12 +14,14 @@ ...@@ -14,12 +14,14 @@
# ============================================================================== # ==============================================================================
"""Tests for models.faster_rcnn_pnas_feature_extractor.""" """Tests for models.faster_rcnn_pnas_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnPNASFeatureExtractorTest(tf.test.TestCase): class FasterRcnnPNASFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride): def _build_feature_extractor(self, first_stage_features_stride):
......
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet based Faster R-CNN implementation in Keras.
See Deep Residual Learning for Image Recognition by He et al.
https://arxiv.org/abs/1512.03385
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models.keras_models import resnet_v1
from object_detection.utils import model_util
_RESNET_MODEL_CONV4_LAST_LAYERS = {
'resnet_v1_50': 'conv4_block6_out',
'resnet_v1_101': 'conv4_block23_out',
'resnet_v1_152': 'conv4_block36_out',
}
class FasterRCNNResnetKerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Faster R-CNN with Resnet feature extractor implementation."""
def __init__(self,
is_training,
resnet_v1_base_model,
resnet_v1_base_model_name,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
resnet_v1_base_model: base resnet v1 network to use. One of
the resnet_v1.resnet_v1_{50,101,152} models.
resnet_v1_base_model_name: model name under which to construct resnet v1.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 16.')
super(FasterRCNNResnetKerasFeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
weight_decay)
self.classification_backbone = None
self._variable_dict = {}
self._resnet_v1_base_model = resnet_v1_base_model
self._resnet_v1_base_model_name = resnet_v1_base_model_name
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def get_proposal_feature_extractor_model(self, name=None):
"""Returns a model that extracts first stage RPN features.
Extracts features using the first half of the Resnet v1 network.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes preprocessed_inputs:
A [batch, height, width, channels] float32 tensor
representing a batch of images.
And returns rpn_feature_map:
A tensor with shape [batch, height, width, depth]
"""
if not self.classification_backbone:
self.classification_backbone = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=None,
weight_decay=self._weight_decay,
classes=None,
weights=None,
include_top=False
)
with tf.name_scope(name):
with tf.name_scope('ResnetV1'):
conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[
self._resnet_v1_base_model_name]
proposal_features = self.classification_backbone.get_layer(
name=conv4_last_layer).output
keras_model = tf.keras.Model(
inputs=self.classification_backbone.inputs,
outputs=proposal_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
def get_box_classifier_feature_extractor_model(self, name=None):
"""Returns a model that extracts second stage box classifier features.
This function reconstructs the "second half" of the ResNet v1
network after the part defined in `get_proposal_feature_extractor_model`.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes proposal_feature_maps:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
And returns proposal_classifier_features:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
if not self.classification_backbone:
self.classification_backbone = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=None,
weight_decay=self._weight_decay,
classes=None,
weights=None,
include_top=False
)
with tf.name_scope(name):
with tf.name_scope('ResnetV1'):
conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[
self._resnet_v1_base_model_name]
proposal_feature_maps = self.classification_backbone.get_layer(
name=conv4_last_layer).output
proposal_classifier_features = self.classification_backbone.get_layer(
name='conv5_block3_out').output
keras_model = model_util.extract_submodel(
model=self.classification_backbone,
inputs=proposal_feature_maps,
outputs=proposal_classifier_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map for restoring from an (object-based) checkpoint.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor (unused).
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor (unused).
Returns:
A dict mapping keys to Keras models
"""
return {'feature_extractor': self.classification_backbone}
class FasterRCNNResnet50KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet50 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_50,
resnet_v1_base_model_name='resnet_v1_50',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
class FasterRCNNResnet101KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet101 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet101KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_101,
resnet_v1_base_model_name='resnet_v1_101',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
class FasterRCNNResnet152KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet152 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet152KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_152,
resnet_v1_base_model_name='resnet_v1_152',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_resnet_keras_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_res
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FasterRcnnResnetKerasFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, architecture='resnet_v1_50'):
return frcnn_res.FasterRCNNResnet50KerasFeatureExtractor(
is_training=False,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 14, 14, 1024])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1024])
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor()
proposal_feature_maps = tf.random_uniform(
[3, 7, 7, 1024], maxval=255, dtype=tf.float32)
model = feature_extractor.get_box_classifier_feature_extractor_model(
name='TestScope')
proposal_classifier_features = (
model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features)
# Note: due to a slight mismatch in slim and keras resnet definitions
# the output shape of the box classifier is slightly different compared to
# that of the slim implementation. The keras version is more `canonical`
# in that it more accurately reflects the original authors' implementation.
# TODO(jonathanhuang): make the output shape match that of the slim
# implementation by using atrous convolutions.
self.assertAllEqual(features_shape.numpy(), [3, 4, 4, 2048])
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
...@@ -14,13 +14,15 @@ ...@@ -14,13 +14,15 @@
# ============================================================================== # ==============================================================================
"""Tests for object_detection.models.faster_rcnn_resnet_v1_feature_extractor.""" """Tests for object_detection.models.faster_rcnn_resnet_v1_feature_extractor."""
import unittest
import numpy as np import numpy as np
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as faster_rcnn_resnet_v1 from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as faster_rcnn_resnet_v1
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnResnetV1FeatureExtractorTest(tf.test.TestCase): class FasterRcnnResnetV1FeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, def _build_feature_extractor(self,
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
# ============================================================================== # ==============================================================================
"""Tests for feature map generators.""" """Tests for feature map generators."""
import unittest
from absl.testing import parameterized from absl.testing import parameterized
import numpy as np import numpy as np
...@@ -25,6 +25,9 @@ from google.protobuf import text_format ...@@ -25,6 +25,9 @@ from google.protobuf import text_format
from object_detection.builders import hyperparams_builder from object_detection.builders import hyperparams_builder
from object_detection.models import feature_map_generators from object_detection.models import feature_map_generators
from object_detection.protos import hyperparams_pb2 from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
INCEPTION_V2_LAYOUT = { INCEPTION_V2_LAYOUT = {
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
...@@ -52,11 +55,7 @@ SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = { ...@@ -52,11 +55,7 @@ SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = {
} }
@parameterized.parameters( class MultiResolutionFeatureMapGeneratorTest(test_case.TestCase):
{'use_keras': False},
{'use_keras': True},
)
class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
def _build_conv_hyperparams(self): def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams = hyperparams_pb2.Hyperparams()
...@@ -73,9 +72,9 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase): ...@@ -73,9 +72,9 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _build_feature_map_generator(self, feature_map_layout, use_keras, def _build_feature_map_generator(self, feature_map_layout,
pool_residual=False): pool_residual=False):
if use_keras: if tf_version.is_tf2():
return feature_map_generators.KerasMultiResolutionFeatureMaps( return feature_map_generators.KerasMultiResolutionFeatureMaps(
feature_map_layout=feature_map_layout, feature_map_layout=feature_map_layout,
depth_multiplier=1, depth_multiplier=1,
...@@ -97,17 +96,18 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase): ...@@ -97,17 +96,18 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
pool_residual=pool_residual) pool_residual=pool_residual)
return feature_map_generator return feature_map_generator
def test_get_expected_feature_map_shapes_with_inception_v2(self, use_keras): def test_get_expected_feature_map_shapes_with_inception_v2(self):
image_features = { with test_utils.GraphContextOrNone() as g:
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), image_features = {
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
} 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
feature_map_generator = self._build_feature_map_generator( }
feature_map_layout=INCEPTION_V2_LAYOUT, feature_map_generator = self._build_feature_map_generator(
use_keras=use_keras feature_map_layout=INCEPTION_V2_LAYOUT)
) def graph_fn():
feature_maps = feature_map_generator(image_features) feature_maps = feature_map_generator(image_features)
return feature_maps
expected_feature_map_shapes = { expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256), 'Mixed_3c': (4, 28, 28, 256),
...@@ -116,29 +116,25 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase): ...@@ -116,29 +116,25 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
init_op = tf.global_variables_initializer() out_feature_map_shapes = dict(
with self.test_session() as sess: (key, value.shape) for key, value in out_feature_maps.items())
sess.run(init_op) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_inception_v2_use_depthwise( def test_get_expected_feature_map_shapes_with_inception_v2_use_depthwise(
self, use_keras): self):
image_features = { with test_utils.GraphContextOrNone() as g:
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), image_features = {
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
} 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
layout_copy = INCEPTION_V2_LAYOUT.copy() }
layout_copy['use_depthwise'] = True layout_copy = INCEPTION_V2_LAYOUT.copy()
feature_map_generator = self._build_feature_map_generator( layout_copy['use_depthwise'] = True
feature_map_layout=layout_copy, feature_map_generator = self._build_feature_map_generator(
use_keras=use_keras feature_map_layout=layout_copy)
) def graph_fn():
feature_maps = feature_map_generator(image_features) return feature_map_generator(image_features)
expected_feature_map_shapes = { expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256), 'Mixed_3c': (4, 28, 28, 256),
...@@ -147,29 +143,25 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase): ...@@ -147,29 +143,25 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
init_op = tf.global_variables_initializer() out_feature_map_shapes = dict(
with self.test_session() as sess: (key, value.shape) for key, value in out_feature_maps.items())
sess.run(init_op) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict( def test_get_expected_feature_map_shapes_use_explicit_padding(self):
(key, value.shape) for key, value in out_feature_maps.items()) with test_utils.GraphContextOrNone() as g:
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
def test_get_expected_feature_map_shapes_use_explicit_padding( 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
self, use_keras): 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
image_features = { }
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), layout_copy = INCEPTION_V2_LAYOUT.copy()
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), layout_copy['use_explicit_padding'] = True
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) feature_map_generator = self._build_feature_map_generator(
} feature_map_layout=layout_copy,
layout_copy = INCEPTION_V2_LAYOUT.copy() )
layout_copy['use_explicit_padding'] = True def graph_fn():
feature_map_generator = self._build_feature_map_generator( return feature_map_generator(image_features)
feature_map_layout=layout_copy,
use_keras=use_keras
)
feature_maps = feature_map_generator(image_features)
expected_feature_map_shapes = { expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256), 'Mixed_3c': (4, 28, 28, 256),
...@@ -178,27 +170,24 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase): ...@@ -178,27 +170,24 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_inception_v3(self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32),
'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32),
'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32)
}
init_op = tf.global_variables_initializer() feature_map_generator = self._build_feature_map_generator(
with self.test_session() as sess: feature_map_layout=INCEPTION_V3_LAYOUT,
sess.run(init_op) )
out_feature_maps = sess.run(feature_maps) def graph_fn():
out_feature_map_shapes = dict( return feature_map_generator(image_features)
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_inception_v3(self, use_keras):
image_features = {
'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32),
'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32),
'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V3_LAYOUT,
use_keras=use_keras
)
feature_maps = feature_map_generator(image_features)
expected_feature_map_shapes = { expected_feature_map_shapes = {
'Mixed_5d': (4, 35, 35, 256), 'Mixed_5d': (4, 35, 35, 256),
...@@ -207,29 +196,26 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase): ...@@ -207,29 +196,26 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)} 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)}
out_feature_maps = self.execute(graph_fn, [], g)
init_op = tf.global_variables_initializer() out_feature_map_shapes = dict(
with self.test_session() as sess: (key, value.shape) for key, value in out_feature_maps.items())
sess.run(init_op) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1( def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1(
self, use_keras): self):
image_features = { with test_utils.GraphContextOrNone() as g:
'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512], image_features = {
dtype=tf.float32), 'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512],
'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32),
dtype=tf.float32), 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],
} dtype=tf.float32),
}
feature_map_generator = self._build_feature_map_generator( feature_map_generator = self._build_feature_map_generator(
feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT, feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT,
use_keras=use_keras )
) def graph_fn():
feature_maps = feature_map_generator(image_features) return feature_map_generator(image_features)
expected_feature_map_shapes = { expected_feature_map_shapes = {
'Conv2d_11_pointwise': (4, 16, 16, 512), 'Conv2d_11_pointwise': (4, 16, 16, 512),
...@@ -237,55 +223,50 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase): ...@@ -237,55 +223,50 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512), 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256), 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256),
'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)} 'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
init_op = tf.global_variables_initializer() out_feature_map_shapes = dict(
with self.test_session() as sess: (key, value.shape) for key, value in out_feature_maps.items())
sess.run(init_op) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1( def test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1(
self, use_keras): self):
image_features = { with test_utils.GraphContextOrNone() as g:
'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], image_features = {
dtype=tf.float32), 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],
} dtype=tf.float32),
}
feature_map_generator = self._build_feature_map_generator( feature_map_generator = self._build_feature_map_generator(
feature_map_layout=SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT, feature_map_layout=SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT,
use_keras=use_keras, pool_residual=True
pool_residual=True )
) def graph_fn():
feature_maps = feature_map_generator(image_features) return feature_map_generator(image_features)
expected_feature_map_shapes = { expected_feature_map_shapes = {
'Conv2d_13_pointwise': (4, 8, 8, 1024), 'Conv2d_13_pointwise': (4, 8, 8, 1024),
'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256': (4, 4, 4, 256), 'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256': (4, 4, 4, 256),
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256': (4, 2, 2, 256), 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256': (4, 2, 2, 256),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 1, 1, 256)} 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_variable_names_with_inception_v2(self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V2_LAYOUT,
)
def graph_fn():
return feature_map_generator(image_features)
init_op = tf.global_variables_initializer() self.execute(graph_fn, [], g)
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_variable_names_with_inception_v2(self, use_keras):
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V2_LAYOUT,
use_keras=use_keras
)
feature_maps = feature_map_generator(image_features)
expected_slim_variables = set([ expected_slim_variables = set([
'Mixed_5c_1_Conv2d_3_1x1_256/weights', 'Mixed_5c_1_Conv2d_3_1x1_256/weights',
'Mixed_5c_1_Conv2d_3_1x1_256/biases', 'Mixed_5c_1_Conv2d_3_1x1_256/biases',
...@@ -316,32 +297,32 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase): ...@@ -316,32 +297,32 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias', 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias',
]) ])
init_op = tf.global_variables_initializer() if tf_version.is_tf2():
with self.test_session() as sess:
sess.run(init_op)
sess.run(feature_maps)
actual_variable_set = set( actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()]) [var.name.split(':')[0] for var in feature_map_generator.variables])
if use_keras: self.assertSetEqual(expected_keras_variables, actual_variable_set)
self.assertSetEqual(expected_keras_variables, actual_variable_set) else:
else: with g.as_default():
self.assertSetEqual(expected_slim_variables, actual_variable_set) actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
def test_get_expected_variable_names_with_inception_v2_use_depthwise( def test_get_expected_variable_names_with_inception_v2_use_depthwise(
self, self):
use_keras): with test_utils.GraphContextOrNone() as g:
image_features = { image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
} }
layout_copy = INCEPTION_V2_LAYOUT.copy() layout_copy = INCEPTION_V2_LAYOUT.copy()
layout_copy['use_depthwise'] = True layout_copy['use_depthwise'] = True
feature_map_generator = self._build_feature_map_generator( feature_map_generator = self._build_feature_map_generator(
feature_map_layout=layout_copy, feature_map_layout=layout_copy,
use_keras=use_keras )
) def graph_fn():
feature_maps = feature_map_generator(image_features) return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([ expected_slim_variables = set([
'Mixed_5c_1_Conv2d_3_1x1_256/weights', 'Mixed_5c_1_Conv2d_3_1x1_256/weights',
...@@ -391,23 +372,20 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase): ...@@ -391,23 +372,20 @@ class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias', 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias',
]) ])
init_op = tf.global_variables_initializer() if tf_version.is_tf2():
with self.test_session() as sess:
sess.run(init_op)
sess.run(feature_maps)
actual_variable_set = set( actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()]) [var.name.split(':')[0] for var in feature_map_generator.variables])
if use_keras: self.assertSetEqual(expected_keras_variables, actual_variable_set)
self.assertSetEqual(expected_keras_variables, actual_variable_set) else:
else: with g.as_default():
self.assertSetEqual(expected_slim_variables, actual_variable_set) actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
@parameterized.parameters({'use_native_resize_op': True, 'use_keras': False}, @parameterized.parameters({'use_native_resize_op': True},
{'use_native_resize_op': False, 'use_keras': False}, {'use_native_resize_op': False})
{'use_native_resize_op': True, 'use_keras': True}, class FPNFeatureMapGeneratorTest(test_case.TestCase, parameterized.TestCase):
{'use_native_resize_op': False, 'use_keras': True})
class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
def _build_conv_hyperparams(self): def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams = hyperparams_pb2.Hyperparams()
...@@ -425,10 +403,10 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -425,10 +403,10 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _build_feature_map_generator( def _build_feature_map_generator(
self, image_features, depth, use_keras, use_bounded_activations=False, self, image_features, depth, use_bounded_activations=False,
use_native_resize_op=False, use_explicit_padding=False, use_native_resize_op=False, use_explicit_padding=False,
use_depthwise=False): use_depthwise=False):
if use_keras: if tf_version.is_tf2():
return feature_map_generators.KerasFpnTopDownFeatureMaps( return feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=len(image_features), num_levels=len(image_features),
depth=depth, depth=depth,
...@@ -454,19 +432,20 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -454,19 +432,20 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
return feature_map_generator return feature_map_generator
def test_get_expected_feature_map_shapes( def test_get_expected_feature_map_shapes(
self, use_native_resize_op, use_keras): self, use_native_resize_op):
image_features = [ with test_utils.GraphContextOrNone() as g:
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), image_features = [
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
] ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
feature_map_generator = self._build_feature_map_generator( ]
image_features=image_features, feature_map_generator = self._build_feature_map_generator(
depth=128, image_features=image_features,
use_keras=use_keras, depth=128,
use_native_resize_op=use_native_resize_op) use_native_resize_op=use_native_resize_op)
feature_maps = feature_map_generator(image_features) def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = { expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128), 'top_down_block2': (4, 8, 8, 128),
...@@ -474,30 +453,27 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -474,30 +453,27 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
'top_down_block4': (4, 2, 2, 128), 'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128) 'top_down_block5': (4, 1, 1, 128)
} }
out_feature_maps = self.execute(graph_fn, [], g)
init_op = tf.global_variables_initializer() out_feature_map_shapes = dict(
with self.test_session() as sess: (key, value.shape) for key, value in out_feature_maps.items())
sess.run(init_op) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = {key: value.shape
for key, value in out_feature_maps.items()}
self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_explicit_padding( def test_get_expected_feature_map_shapes_with_explicit_padding(
self, use_native_resize_op, use_keras): self, use_native_resize_op):
image_features = [ with test_utils.GraphContextOrNone() as g:
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), image_features = [
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
] ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
feature_map_generator = self._build_feature_map_generator( ]
image_features=image_features, feature_map_generator = self._build_feature_map_generator(
depth=128, image_features=image_features,
use_keras=use_keras, depth=128,
use_explicit_padding=True, use_explicit_padding=True,
use_native_resize_op=use_native_resize_op) use_native_resize_op=use_native_resize_op)
feature_maps = feature_map_generator(image_features) def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = { expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128), 'top_down_block2': (4, 8, 8, 128),
...@@ -505,19 +481,15 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -505,19 +481,15 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
'top_down_block4': (4, 2, 2, 128), 'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128) 'top_down_block5': (4, 1, 1, 128)
} }
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
init_op = tf.global_variables_initializer() @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = {key: value.shape
for key, value in out_feature_maps.items()}
self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
def test_use_bounded_activations_add_operations( def test_use_bounded_activations_add_operations(
self, use_native_resize_op, use_keras): self, use_native_resize_op):
tf_graph = tf.Graph() with test_utils.GraphContextOrNone() as g:
with tf_graph.as_default():
image_features = [('block2', image_features = [('block2',
tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', ('block3',
...@@ -529,34 +501,23 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -529,34 +501,23 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
feature_map_generator = self._build_feature_map_generator( feature_map_generator = self._build_feature_map_generator(
image_features=image_features, image_features=image_features,
depth=128, depth=128,
use_keras=use_keras,
use_bounded_activations=True, use_bounded_activations=True,
use_native_resize_op=use_native_resize_op) use_native_resize_op=use_native_resize_op)
feature_map_generator(image_features) def graph_fn():
return feature_map_generator(image_features)
if use_keras: self.execute(graph_fn, [], g)
expected_added_operations = dict.fromkeys([ expected_added_operations = dict.fromkeys([
'FeatureMaps/top_down/clip_by_value/clip_by_value', 'top_down/clip_by_value', 'top_down/clip_by_value_1',
'FeatureMaps/top_down/clip_by_value_1/clip_by_value', 'top_down/clip_by_value_2', 'top_down/clip_by_value_3',
'FeatureMaps/top_down/clip_by_value_2/clip_by_value', 'top_down/clip_by_value_4', 'top_down/clip_by_value_5',
'FeatureMaps/top_down/clip_by_value_3/clip_by_value', 'top_down/clip_by_value_6'
'FeatureMaps/top_down/clip_by_value_4/clip_by_value', ])
'FeatureMaps/top_down/clip_by_value_5/clip_by_value', op_names = {op.name: None for op in g.get_operations()}
'FeatureMaps/top_down/clip_by_value_6/clip_by_value', self.assertDictContainsSubset(expected_added_operations, op_names)
])
else:
expected_added_operations = dict.fromkeys([
'top_down/clip_by_value', 'top_down/clip_by_value_1',
'top_down/clip_by_value_2', 'top_down/clip_by_value_3',
'top_down/clip_by_value_4', 'top_down/clip_by_value_5',
'top_down/clip_by_value_6'
])
op_names = {op.name: None for op in tf_graph.get_operations()}
self.assertDictContainsSubset(expected_added_operations, op_names)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
def test_use_bounded_activations_clip_value( def test_use_bounded_activations_clip_value(
self, use_native_resize_op, use_keras): self, use_native_resize_op):
tf_graph = tf.Graph() tf_graph = tf.Graph()
with tf_graph.as_default(): with tf_graph.as_default():
image_features = [ image_features = [
...@@ -568,28 +529,16 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -568,28 +529,16 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
feature_map_generator = self._build_feature_map_generator( feature_map_generator = self._build_feature_map_generator(
image_features=image_features, image_features=image_features,
depth=128, depth=128,
use_keras=use_keras,
use_bounded_activations=True, use_bounded_activations=True,
use_native_resize_op=use_native_resize_op) use_native_resize_op=use_native_resize_op)
feature_map_generator(image_features) feature_map_generator(image_features)
if use_keras: expected_clip_by_value_ops = [
expected_clip_by_value_ops = dict.fromkeys([ 'top_down/clip_by_value', 'top_down/clip_by_value_1',
'FeatureMaps/top_down/clip_by_value/clip_by_value', 'top_down/clip_by_value_2', 'top_down/clip_by_value_3',
'FeatureMaps/top_down/clip_by_value_1/clip_by_value', 'top_down/clip_by_value_4', 'top_down/clip_by_value_5',
'FeatureMaps/top_down/clip_by_value_2/clip_by_value', 'top_down/clip_by_value_6'
'FeatureMaps/top_down/clip_by_value_3/clip_by_value', ]
'FeatureMaps/top_down/clip_by_value_4/clip_by_value',
'FeatureMaps/top_down/clip_by_value_5/clip_by_value',
'FeatureMaps/top_down/clip_by_value_6/clip_by_value',
])
else:
expected_clip_by_value_ops = [
'top_down/clip_by_value', 'top_down/clip_by_value_1',
'top_down/clip_by_value_2', 'top_down/clip_by_value_3',
'top_down/clip_by_value_4', 'top_down/clip_by_value_5',
'top_down/clip_by_value_6'
]
# Gathers activation tensors before and after clip_by_value operations. # Gathers activation tensors before and after clip_by_value operations.
activations = {} activations = {}
...@@ -631,20 +580,21 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -631,20 +580,21 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
self.assertLessEqual(after_clipping_upper_bound, expected_upper_bound) self.assertLessEqual(after_clipping_upper_bound, expected_upper_bound)
def test_get_expected_feature_map_shapes_with_depthwise( def test_get_expected_feature_map_shapes_with_depthwise(
self, use_native_resize_op, use_keras): self, use_native_resize_op):
image_features = [ with test_utils.GraphContextOrNone() as g:
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), image_features = [
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
] ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
feature_map_generator = self._build_feature_map_generator( ]
image_features=image_features, feature_map_generator = self._build_feature_map_generator(
depth=128, image_features=image_features,
use_keras=use_keras, depth=128,
use_depthwise=True, use_depthwise=True,
use_native_resize_op=use_native_resize_op) use_native_resize_op=use_native_resize_op)
feature_maps = feature_map_generator(image_features) def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = { expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128), 'top_down_block2': (4, 8, 8, 128),
...@@ -652,30 +602,27 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -652,30 +602,27 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
'top_down_block4': (4, 2, 2, 128), 'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128) 'top_down_block5': (4, 1, 1, 128)
} }
out_feature_maps = self.execute(graph_fn, [], g)
init_op = tf.global_variables_initializer() out_feature_map_shapes = dict(
with self.test_session() as sess: (key, value.shape) for key, value in out_feature_maps.items())
sess.run(init_op) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = {key: value.shape
for key, value in out_feature_maps.items()}
self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
def test_get_expected_variable_names( def test_get_expected_variable_names(
self, use_native_resize_op, use_keras): self, use_native_resize_op):
image_features = [ with test_utils.GraphContextOrNone() as g:
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), image_features = [
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
] ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
feature_map_generator = self._build_feature_map_generator( ]
image_features=image_features, feature_map_generator = self._build_feature_map_generator(
depth=128, image_features=image_features,
use_keras=use_keras, depth=128,
use_native_resize_op=use_native_resize_op) use_native_resize_op=use_native_resize_op)
feature_maps = feature_map_generator(image_features) def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([ expected_slim_variables = set([
'projection_1/weights', 'projection_1/weights',
'projection_1/biases', 'projection_1/biases',
...@@ -709,33 +656,34 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -709,33 +656,34 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
'FeatureMaps/top_down/smoothing_3_conv/kernel', 'FeatureMaps/top_down/smoothing_3_conv/kernel',
'FeatureMaps/top_down/smoothing_3_conv/bias' 'FeatureMaps/top_down/smoothing_3_conv/bias'
]) ])
init_op = tf.global_variables_initializer()
with self.test_session() as sess: if tf_version.is_tf2():
sess.run(init_op)
sess.run(feature_maps)
actual_variable_set = set( actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()]) [var.name.split(':')[0] for var in feature_map_generator.variables])
if use_keras: self.assertSetEqual(expected_keras_variables, actual_variable_set)
self.assertSetEqual(expected_keras_variables, actual_variable_set) else:
else: with g.as_default():
self.assertSetEqual(expected_slim_variables, actual_variable_set) actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
def test_get_expected_variable_names_with_depthwise( def test_get_expected_variable_names_with_depthwise(
self, use_native_resize_op, use_keras): self, use_native_resize_op):
image_features = [ with test_utils.GraphContextOrNone() as g:
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), image_features = [
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
] ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
feature_map_generator = self._build_feature_map_generator( ]
image_features=image_features, feature_map_generator = self._build_feature_map_generator(
depth=128, image_features=image_features,
use_keras=use_keras, depth=128,
use_depthwise=True, use_depthwise=True,
use_native_resize_op=use_native_resize_op) use_native_resize_op=use_native_resize_op)
feature_maps = feature_map_generator(image_features) def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([ expected_slim_variables = set([
'projection_1/weights', 'projection_1/weights',
'projection_1/biases', 'projection_1/biases',
...@@ -775,16 +723,16 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -775,16 +723,16 @@ class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):
'FeatureMaps/top_down/smoothing_3_depthwise_conv/pointwise_kernel', 'FeatureMaps/top_down/smoothing_3_depthwise_conv/pointwise_kernel',
'FeatureMaps/top_down/smoothing_3_depthwise_conv/bias' 'FeatureMaps/top_down/smoothing_3_depthwise_conv/bias'
]) ])
init_op = tf.global_variables_initializer()
with self.test_session() as sess: if tf_version.is_tf2():
sess.run(init_op)
sess.run(feature_maps)
actual_variable_set = set( actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()]) [var.name.split(':')[0] for var in feature_map_generator.variables])
if use_keras: self.assertSetEqual(expected_keras_variables, actual_variable_set)
self.assertSetEqual(expected_keras_variables, actual_variable_set) else:
else: with g.as_default():
self.assertSetEqual(expected_slim_variables, actual_variable_set) actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
class GetDepthFunctionTest(tf.test.TestCase): class GetDepthFunctionTest(tf.test.TestCase):
...@@ -804,6 +752,7 @@ class GetDepthFunctionTest(tf.test.TestCase): ...@@ -804,6 +752,7 @@ class GetDepthFunctionTest(tf.test.TestCase):
{'replace_pool_with_conv': False}, {'replace_pool_with_conv': False},
{'replace_pool_with_conv': True}, {'replace_pool_with_conv': True},
) )
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class PoolingPyramidFeatureMapGeneratorTest(tf.test.TestCase): class PoolingPyramidFeatureMapGeneratorTest(tf.test.TestCase):
def test_get_expected_feature_map_shapes(self, replace_pool_with_conv): def test_get_expected_feature_map_shapes(self, replace_pool_with_conv):
......
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Write keras weights into a tensorflow checkpoint.
The imagenet weights in `keras.applications` are downloaded from github.
This script converts them into the tensorflow checkpoint format and stores them
on disk where they can be easily accessible during training.
"""
from __future__ import print_function
import os
from absl import app
import numpy as np
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('model', 'resnet_v2_101',
'The model to load. The following are supported: '
'"resnet_v1_50", "resnet_v1_101", "resnet_v2_50", '
'"resnet_v2_101"')
tf.flags.DEFINE_string('output_path', None,
'The directory to output weights in.')
tf.flags.DEFINE_boolean('verify_weights', True,
('Verify the weights are loaded correctly by making '
'sure the predictions are the same before and after '
'saving.'))
def init_model(name):
"""Creates a Keras Model with the specific ResNet version."""
if name == 'resnet_v1_50':
model = tf.keras.applications.ResNet50(weights='imagenet')
elif name == 'resnet_v1_101':
model = tf.keras.applications.ResNet101(weights='imagenet')
elif name == 'resnet_v2_50':
model = tf.keras.applications.ResNet50V2(weights='imagenet')
elif name == 'resnet_v2_101':
model = tf.keras.applications.ResNet101V2(weights='imagenet')
else:
raise ValueError('Model {} not supported'.format(FLAGS.model))
return model
def main(_):
model = init_model(FLAGS.model)
path = os.path.join(FLAGS.output_path, FLAGS.model)
tf.gfile.MakeDirs(path)
weights_path = os.path.join(path, 'weights')
ckpt = tf.train.Checkpoint(feature_extractor=model)
saved_path = ckpt.save(weights_path)
if FLAGS.verify_weights:
imgs = np.random.randn(1, 224, 224, 3).astype(np.float32)
keras_preds = model(imgs)
model = init_model(FLAGS.model)
ckpt.restore(saved_path)
loaded_weights_pred = model(imgs).numpy()
if not np.all(np.isclose(keras_preds, loaded_weights_pred)):
raise RuntimeError('The model was not saved correctly.')
if __name__ == '__main__':
tf.enable_v2_behavior()
app.run(main)
...@@ -13,14 +13,16 @@ ...@@ -13,14 +13,16 @@
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
"""Testing the Hourglass network.""" """Testing the Hourglass network."""
import unittest
from absl.testing import parameterized from absl.testing import parameterized
import numpy as np import numpy as np
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
from object_detection.models.keras_models import hourglass_network as hourglass from object_detection.models.keras_models import hourglass_network as hourglass
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class HourglassFeatureExtractorTest(tf.test.TestCase, parameterized.TestCase): class HourglassFeatureExtractorTest(tf.test.TestCase, parameterized.TestCase):
def test_identity_layer(self): def test_identity_layer(self):
...@@ -95,5 +97,4 @@ class HourglassFeatureExtractorTest(tf.test.TestCase, parameterized.TestCase): ...@@ -95,5 +97,4 @@ class HourglassFeatureExtractorTest(tf.test.TestCase, parameterized.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main() tf.test.main()
...@@ -30,13 +30,14 @@ consistent. ...@@ -30,13 +30,14 @@ consistent.
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import unittest
import numpy as np import numpy as np
from six.moves import zip from six.moves import zip
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
from object_detection.models.keras_models import inception_resnet_v2 from object_detection.models.keras_models import inception_resnet_v2
from object_detection.utils import test_case from object_detection.utils import test_case
from object_detection.utils import tf_version
_KERAS_TO_SLIM_ENDPOINT_NAMES = { _KERAS_TO_SLIM_ENDPOINT_NAMES = {
'activation': 'Conv2d_1a_3x3', 'activation': 'Conv2d_1a_3x3',
...@@ -100,6 +101,7 @@ _NUM_CHANNELS = 3 ...@@ -100,6 +101,7 @@ _NUM_CHANNELS = 3
_BATCH_SIZE = 2 _BATCH_SIZE = 2
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class InceptionResnetV2Test(test_case.TestCase): class InceptionResnetV2Test(test_case.TestCase):
def _create_application_with_layer_outputs( def _create_application_with_layer_outputs(
...@@ -166,8 +168,7 @@ class InceptionResnetV2Test(test_case.TestCase): ...@@ -166,8 +168,7 @@ class InceptionResnetV2Test(test_case.TestCase):
model = self._create_application_with_layer_outputs( model = self._create_application_with_layer_outputs(
layer_names=layer_names, layer_names=layer_names,
batchnorm_training=False) batchnorm_training=False)
preprocessed_inputs = tf.placeholder( preprocessed_inputs = tf.random.uniform([4, 40, 40, _NUM_CHANNELS])
tf.float32, (4, None, None, _NUM_CHANNELS))
model(preprocessed_inputs) model(preprocessed_inputs)
return model.variables return model.variables
......
...@@ -29,7 +29,7 @@ consistent. ...@@ -29,7 +29,7 @@ consistent.
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import unittest
import numpy as np import numpy as np
from six.moves import zip from six.moves import zip
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
...@@ -42,6 +42,7 @@ from object_detection.models.keras_models import model_utils ...@@ -42,6 +42,7 @@ from object_detection.models.keras_models import model_utils
from object_detection.models.keras_models import test_utils from object_detection.models.keras_models import test_utils
from object_detection.protos import hyperparams_pb2 from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case from object_detection.utils import test_case
from object_detection.utils import tf_version
_KERAS_LAYERS_TO_CHECK = [ _KERAS_LAYERS_TO_CHECK = [
'conv1_relu', 'conv1_relu',
...@@ -64,6 +65,7 @@ _NUM_CHANNELS = 3 ...@@ -64,6 +65,7 @@ _NUM_CHANNELS = 3
_BATCH_SIZE = 2 _BATCH_SIZE = 2
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MobilenetV1Test(test_case.TestCase): class MobilenetV1Test(test_case.TestCase):
def _build_conv_hyperparams(self): def _build_conv_hyperparams(self):
...@@ -118,19 +120,17 @@ class MobilenetV1Test(test_case.TestCase): ...@@ -118,19 +120,17 @@ class MobilenetV1Test(test_case.TestCase):
self, image_height, image_width, depth_multiplier, self, image_height, image_width, depth_multiplier,
expected_feature_map_shape, use_explicit_padding=False, min_depth=8, expected_feature_map_shape, use_explicit_padding=False, min_depth=8,
layer_names=None, conv_defs=None): layer_names=None, conv_defs=None):
def graph_fn(image_tensor): model = self._create_application_with_layer_outputs(
model = self._create_application_with_layer_outputs( layer_names=layer_names,
layer_names=layer_names, batchnorm_training=False,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
use_explicit_padding=use_explicit_padding, min_depth=min_depth,
min_depth=min_depth, alpha=depth_multiplier,
alpha=depth_multiplier, conv_defs=conv_defs)
conv_defs=conv_defs)
return model(image_tensor)
image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width, image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width,
_NUM_CHANNELS).astype(np.float32) _NUM_CHANNELS).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor]) feature_maps = model(image_tensor)
for feature_map, expected_shape in zip(feature_maps, for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shape): expected_feature_map_shape):
...@@ -140,36 +140,29 @@ class MobilenetV1Test(test_case.TestCase): ...@@ -140,36 +140,29 @@ class MobilenetV1Test(test_case.TestCase):
self, image_height, image_width, depth_multiplier, self, image_height, image_width, depth_multiplier,
expected_feature_map_shape, use_explicit_padding=False, min_depth=8, expected_feature_map_shape, use_explicit_padding=False, min_depth=8,
layer_names=None): layer_names=None):
def graph_fn(image_height, image_width): image_tensor = tf.random_uniform([_BATCH_SIZE, image_height, image_width,
image_tensor = tf.random_uniform([_BATCH_SIZE, image_height, image_width, _NUM_CHANNELS], dtype=tf.float32)
_NUM_CHANNELS], dtype=tf.float32) model = self._create_application_with_layer_outputs(
model = self._create_application_with_layer_outputs( layer_names=layer_names,
layer_names=layer_names, batchnorm_training=False,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
use_explicit_padding=use_explicit_padding, alpha=depth_multiplier)
alpha=depth_multiplier)
return model(image_tensor)
feature_maps = self.execute_cpu(graph_fn, [ feature_maps = model(image_tensor)
np.array(image_height, dtype=np.int32),
np.array(image_width, dtype=np.int32)
])
for feature_map, expected_shape in zip(feature_maps, for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shape): expected_feature_map_shape):
self.assertAllEqual(feature_map.shape, expected_shape) self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, depth_multiplier, layer_names=None): def _get_variables(self, depth_multiplier, layer_names=None):
g = tf.Graph() tf.keras.backend.clear_session()
with g.as_default(): model = self._create_application_with_layer_outputs(
preprocessed_inputs = tf.placeholder( layer_names=layer_names,
tf.float32, (4, None, None, _NUM_CHANNELS)) batchnorm_training=False, use_explicit_padding=False,
model = self._create_application_with_layer_outputs( alpha=depth_multiplier)
layer_names=layer_names, preprocessed_inputs = tf.random.uniform([2, 40, 40, 3])
batchnorm_training=False, use_explicit_padding=False, model(preprocessed_inputs)
alpha=depth_multiplier) return model.variables
model(preprocessed_inputs)
return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
def test_returns_correct_shapes_128(self): def test_returns_correct_shapes_128(self):
image_height = 128 image_height = 128
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import unittest
import numpy as np import numpy as np
from six.moves import zip from six.moves import zip
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
...@@ -31,6 +31,7 @@ from object_detection.models.keras_models import model_utils ...@@ -31,6 +31,7 @@ from object_detection.models.keras_models import model_utils
from object_detection.models.keras_models import test_utils from object_detection.models.keras_models import test_utils
from object_detection.protos import hyperparams_pb2 from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case from object_detection.utils import test_case
from object_detection.utils import tf_version
_layers_to_check = [ _layers_to_check = [
'Conv1_relu', 'Conv1_relu',
...@@ -53,6 +54,7 @@ _layers_to_check = [ ...@@ -53,6 +54,7 @@ _layers_to_check = [
'out_relu'] 'out_relu']
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MobilenetV2Test(test_case.TestCase): class MobilenetV2Test(test_case.TestCase):
def _build_conv_hyperparams(self): def _build_conv_hyperparams(self):
...@@ -86,6 +88,8 @@ class MobilenetV2Test(test_case.TestCase): ...@@ -86,6 +88,8 @@ class MobilenetV2Test(test_case.TestCase):
min_depth=None, min_depth=None,
conv_defs=None): conv_defs=None):
"""Constructs Keras mobilenetv2 that extracts intermediate layer outputs.""" """Constructs Keras mobilenetv2 that extracts intermediate layer outputs."""
# Have to clear the Keras backend to ensure isolation in layer naming
tf.keras.backend.clear_session()
if not layer_names: if not layer_names:
layer_names = _layers_to_check layer_names = _layers_to_check
full_model = mobilenet_v2.mobilenet_v2( full_model = mobilenet_v2.mobilenet_v2(
...@@ -107,19 +111,17 @@ class MobilenetV2Test(test_case.TestCase): ...@@ -107,19 +111,17 @@ class MobilenetV2Test(test_case.TestCase):
self, batch_size, image_height, image_width, depth_multiplier, self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False, min_depth=None, expected_feature_map_shapes, use_explicit_padding=False, min_depth=None,
layer_names=None, conv_defs=None): layer_names=None, conv_defs=None):
def graph_fn(image_tensor): model = self._create_application_with_layer_outputs(
model = self._create_application_with_layer_outputs( layer_names=layer_names,
layer_names=layer_names, batchnorm_training=False,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
use_explicit_padding=use_explicit_padding, min_depth=min_depth,
min_depth=min_depth, alpha=depth_multiplier,
alpha=depth_multiplier, conv_defs=conv_defs)
conv_defs=conv_defs)
return model(image_tensor)
image_tensor = np.random.rand(batch_size, image_height, image_width, image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32) 3).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor]) feature_maps = model([image_tensor])
for feature_map, expected_shape in zip(feature_maps, for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shapes): expected_feature_map_shapes):
...@@ -129,34 +131,30 @@ class MobilenetV2Test(test_case.TestCase): ...@@ -129,34 +131,30 @@ class MobilenetV2Test(test_case.TestCase):
self, batch_size, image_height, image_width, depth_multiplier, self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False, expected_feature_map_shapes, use_explicit_padding=False,
layer_names=None): layer_names=None):
def graph_fn(image_height, image_width): height = tf.random.uniform([], minval=image_height, maxval=image_height+1,
image_tensor = tf.random_uniform([batch_size, image_height, image_width, dtype=tf.int32)
3], dtype=tf.float32) width = tf.random.uniform([], minval=image_width, maxval=image_width+1,
model = self._create_application_with_layer_outputs( dtype=tf.int32)
layer_names=layer_names, image_tensor = tf.random.uniform([batch_size, height, width,
batchnorm_training=False, use_explicit_padding=use_explicit_padding, 3], dtype=tf.float32)
alpha=depth_multiplier) model = self._create_application_with_layer_outputs(
return model(image_tensor) layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
feature_maps = self.execute_cpu(graph_fn, [ alpha=depth_multiplier)
np.array(image_height, dtype=np.int32), feature_maps = model(image_tensor)
np.array(image_width, dtype=np.int32)
])
for feature_map, expected_shape in zip(feature_maps, for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shapes): expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape) self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, depth_multiplier, layer_names=None): def _get_variables(self, depth_multiplier, layer_names=None):
g = tf.Graph() tf.keras.backend.clear_session()
with g.as_default(): model = self._create_application_with_layer_outputs(
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) layer_names=layer_names,
model = self._create_application_with_layer_outputs( batchnorm_training=False, use_explicit_padding=False,
layer_names=layer_names, alpha=depth_multiplier)
batchnorm_training=False, use_explicit_padding=False, preprocessed_inputs = tf.random.uniform([2, 40, 40, 3])
alpha=depth_multiplier) model(preprocessed_inputs)
model(preprocessed_inputs) return model.variables
return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
def test_returns_correct_shapes_128(self): def test_returns_correct_shapes_128(self):
image_height = 128 image_height = 128
......
...@@ -19,7 +19,7 @@ object detection. To verify the consistency of the two models, we compare: ...@@ -19,7 +19,7 @@ object detection. To verify the consistency of the two models, we compare:
1. Output shape of each layer given different inputs. 1. Output shape of each layer given different inputs.
2. Number of global variables. 2. Number of global variables.
""" """
import unittest
import numpy as np import numpy as np
from six.moves import zip from six.moves import zip
import tensorflow.compat.v1 as tf import tensorflow.compat.v1 as tf
...@@ -30,6 +30,7 @@ from object_detection.builders import hyperparams_builder ...@@ -30,6 +30,7 @@ from object_detection.builders import hyperparams_builder
from object_detection.models.keras_models import resnet_v1 from object_detection.models.keras_models import resnet_v1
from object_detection.protos import hyperparams_pb2 from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case from object_detection.utils import test_case
from object_detection.utils import tf_version
_EXPECTED_SHAPES_224_RESNET50 = { _EXPECTED_SHAPES_224_RESNET50 = {
'conv2_block3_out': (4, 56, 56, 256), 'conv2_block3_out': (4, 56, 56, 256),
...@@ -65,6 +66,7 @@ _NUM_CHANNELS = 3 ...@@ -65,6 +66,7 @@ _NUM_CHANNELS = 3
_BATCH_SIZE = 4 _BATCH_SIZE = 4
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ResnetV1Test(test_case.TestCase): class ResnetV1Test(test_case.TestCase):
def _build_conv_hyperparams(self): def _build_conv_hyperparams(self):
...@@ -146,8 +148,7 @@ class ResnetV1Test(test_case.TestCase): ...@@ -146,8 +148,7 @@ class ResnetV1Test(test_case.TestCase):
tf.keras.backend.clear_session() tf.keras.backend.clear_session()
model = self._create_application_with_layer_outputs( model = self._create_application_with_layer_outputs(
model_index, batchnorm_training=False) model_index, batchnorm_training=False)
preprocessed_inputs = tf.placeholder(tf.float32, preprocessed_inputs = tf.random.uniform([2, 40, 40, _NUM_CHANNELS])
(4, None, None, _NUM_CHANNELS))
model(preprocessed_inputs) model(preprocessed_inputs)
return model.variables return model.variables
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment