Unverified Commit 4a0b3e4b authored by Liangzhe's avatar Liangzhe Committed by GitHub
Browse files

Merged commit includes the following changes: (#8235)



298416930  by lzyuan:

    Explicitly mark base models' state inputs as 'raw_inputs/init_lstm_h_1' and 'raw_inputs_init_lstm_h_2' when pre_bottleneck=True.

--
298380851  by skligys:

    Fix LSTD LSTM cells to use fixed_quantize_op().

--
297662737  by Menglong Zhu:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
289667197  by lzyuan:

    Internal update.

--
288607438  by lzyuan:

    Enforce feature_extractor construction using arg keys.

--

PiperOrigin-RevId: 298416930
Co-authored-by: default avatarMenglong Zhu <menglong@google.com>
parent 8b47aa3d
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
"""Tests for video_object_detection.metrics.coco_video_evaluation.""" """Tests for video_object_detection.metrics.coco_video_evaluation."""
import numpy as np import numpy as np
import tensorflow as tf import tensorflow.compat.v1 as tf
from lstm_object_detection.metrics import coco_evaluation_all_frames from lstm_object_detection.metrics import coco_evaluation_all_frames
from object_detection.core import standard_fields from object_detection.core import standard_fields
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
"""Tests for lstm_object_detection.tensorflow.model_builder.""" """Tests for lstm_object_detection.tensorflow.model_builder."""
import tensorflow as tf import tensorflow.compat.v1 as tf
from google.protobuf import text_format from google.protobuf import text_format
from lstm_object_detection import model_builder from lstm_object_detection import model_builder
from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
"""LSTDInterleavedFeatureExtractor which interleaves multiple MobileNet V2.""" """LSTDInterleavedFeatureExtractor which interleaves multiple MobileNet V2."""
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim from tensorflow.contrib import slim
from tensorflow.python.framework import ops as tf_ops from tensorflow.python.framework import ops as tf_ops
...@@ -64,8 +64,15 @@ class LSTMSSDInterleavedMobilenetV2FeatureExtractor( ...@@ -64,8 +64,15 @@ class LSTMSSDInterleavedMobilenetV2FeatureExtractor(
`conv_hyperparams_fn`. `conv_hyperparams_fn`.
""" """
super(LSTMSSDInterleavedMobilenetV2FeatureExtractor, self).__init__( super(LSTMSSDInterleavedMobilenetV2FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple, is_training=is_training,
conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise, depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams) override_base_feature_extractor_hyperparams)
# RANDOM_SKIP_SMALL means the training policy is random and the small model # RANDOM_SKIP_SMALL means the training policy is random and the small model
# does not update state during training. # does not update state during training.
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
import itertools import itertools
import numpy as np import numpy as np
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim from tensorflow.contrib import slim
from tensorflow.contrib import training as contrib_training from tensorflow.contrib import training as contrib_training
...@@ -60,6 +60,47 @@ class LSTMSSDInterleavedMobilenetV2FeatureExtractorTest( ...@@ -60,6 +60,47 @@ class LSTMSSDInterleavedMobilenetV2FeatureExtractorTest(
feature_extractor.is_quantized = is_quantized feature_extractor.is_quantized = is_quantized
return feature_extractor return feature_extractor
def test_feature_extractor_construct_with_expected_params(self):
def conv_hyperparams_fn():
with (slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm) and
slim.arg_scope([slim.batch_norm], decay=0.97, epsilon=1e-3)) as sc:
return sc
params = {
'is_training': True,
'depth_multiplier': .55,
'min_depth': 9,
'pad_to_multiple': 3,
'conv_hyperparams_fn': conv_hyperparams_fn,
'reuse_weights': False,
'use_explicit_padding': True,
'use_depthwise': False,
'override_base_feature_extractor_hyperparams': True}
feature_extractor = (
lstm_ssd_interleaved_mobilenet_v2_feature_extractor
.LSTMSSDInterleavedMobilenetV2FeatureExtractor(**params))
self.assertEqual(params['is_training'],
feature_extractor._is_training)
self.assertEqual(params['depth_multiplier'],
feature_extractor._depth_multiplier)
self.assertEqual(params['min_depth'],
feature_extractor._min_depth)
self.assertEqual(params['pad_to_multiple'],
feature_extractor._pad_to_multiple)
self.assertEqual(params['conv_hyperparams_fn'],
feature_extractor._conv_hyperparams_fn)
self.assertEqual(params['reuse_weights'],
feature_extractor._reuse_weights)
self.assertEqual(params['use_explicit_padding'],
feature_extractor._use_explicit_padding)
self.assertEqual(params['use_depthwise'],
feature_extractor._use_depthwise)
self.assertEqual(params['override_base_feature_extractor_hyperparams'],
(feature_extractor.
_override_base_feature_extractor_hyperparams))
def test_extract_features_returns_correct_shapes_128(self): def test_extract_features_returns_correct_shapes_128(self):
image_height = 128 image_height = 128
image_width = 128 image_width = 128
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
"""LSTMSSDFeatureExtractor for MobilenetV1 features.""" """LSTMSSDFeatureExtractor for MobilenetV1 features."""
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim from tensorflow.contrib import slim as contrib_slim
from tensorflow.python.framework import ops as tf_ops from tensorflow.python.framework import ops as tf_ops
from lstm_object_detection.lstm import lstm_cells from lstm_object_detection.lstm import lstm_cells
...@@ -66,8 +66,15 @@ class LSTMSSDMobileNetV1FeatureExtractor( ...@@ -66,8 +66,15 @@ class LSTMSSDMobileNetV1FeatureExtractor(
lstm_state_depth: An integter of the depth of the lstm state. lstm_state_depth: An integter of the depth of the lstm state.
""" """
super(LSTMSSDMobileNetV1FeatureExtractor, self).__init__( super(LSTMSSDMobileNetV1FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple, is_training=is_training,
conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise, depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams) override_base_feature_extractor_hyperparams)
self._feature_map_layout = { self._feature_map_layout = {
'from_layer': ['Conv2d_13_pointwise_lstm', '', '', '', ''], 'from_layer': ['Conv2d_13_pointwise_lstm', '', '', '', ''],
......
...@@ -16,11 +16,11 @@ ...@@ -16,11 +16,11 @@
"""Tests for models.lstm_ssd_mobilenet_v1_feature_extractor.""" """Tests for models.lstm_ssd_mobilenet_v1_feature_extractor."""
import numpy as np import numpy as np
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim from tensorflow.contrib import slim as contrib_slim
from tensorflow.contrib import training as contrib_training from tensorflow.contrib import training as contrib_training
from lstm_object_detection.models import lstm_ssd_mobilenet_v1_feature_extractor as feature_extactor from lstm_object_detection.models import lstm_ssd_mobilenet_v1_feature_extractor as feature_extractor
from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_feature_extractor_test
slim = contrib_slim slim = contrib_slim
...@@ -48,7 +48,7 @@ class LstmSsdMobilenetV1FeatureExtractorTest( ...@@ -48,7 +48,7 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
""" """
min_depth = 32 min_depth = 32
extractor = ( extractor = (
feature_extactor.LSTMSSDMobileNetV1FeatureExtractor( feature_extractor.LSTMSSDMobileNetV1FeatureExtractor(
is_training, is_training,
depth_multiplier, depth_multiplier,
min_depth, min_depth,
...@@ -58,6 +58,46 @@ class LstmSsdMobilenetV1FeatureExtractorTest( ...@@ -58,6 +58,46 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
extractor.lstm_state_depth = int(256 * depth_multiplier) extractor.lstm_state_depth = int(256 * depth_multiplier)
return extractor return extractor
def test_feature_extractor_construct_with_expected_params(self):
def conv_hyperparams_fn():
with (slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm) and
slim.arg_scope([slim.batch_norm], decay=0.97, epsilon=1e-3)) as sc:
return sc
params = {
'is_training': True,
'depth_multiplier': .55,
'min_depth': 9,
'pad_to_multiple': 3,
'conv_hyperparams_fn': conv_hyperparams_fn,
'reuse_weights': False,
'use_explicit_padding': True,
'use_depthwise': False,
'override_base_feature_extractor_hyperparams': True}
extractor = (
feature_extractor.LSTMSSDMobileNetV1FeatureExtractor(**params))
self.assertEqual(params['is_training'],
extractor._is_training)
self.assertEqual(params['depth_multiplier'],
extractor._depth_multiplier)
self.assertEqual(params['min_depth'],
extractor._min_depth)
self.assertEqual(params['pad_to_multiple'],
extractor._pad_to_multiple)
self.assertEqual(params['conv_hyperparams_fn'],
extractor._conv_hyperparams_fn)
self.assertEqual(params['reuse_weights'],
extractor._reuse_weights)
self.assertEqual(params['use_explicit_padding'],
extractor._use_explicit_padding)
self.assertEqual(params['use_depthwise'],
extractor._use_depthwise)
self.assertEqual(params['override_base_feature_extractor_hyperparams'],
(extractor.
_override_base_feature_extractor_hyperparams))
def test_extract_features_returns_correct_shapes_256(self): def test_extract_features_returns_correct_shapes_256(self):
image_height = 256 image_height = 256
image_width = 256 image_width = 256
...@@ -87,8 +127,8 @@ class LstmSsdMobilenetV1FeatureExtractorTest( ...@@ -87,8 +127,8 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
def test_preprocess_returns_correct_value_range(self): def test_preprocess_returns_correct_value_range(self):
test_image = np.random.rand(5, 128, 128, 3) test_image = np.random.rand(5, 128, 128, 3)
feature_extractor = self._create_feature_extractor() extractor = self._create_feature_extractor()
preprocessed_image = feature_extractor.preprocess(test_image) preprocessed_image = extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self): def test_variables_only_created_in_scope(self):
...@@ -96,8 +136,8 @@ class LstmSsdMobilenetV1FeatureExtractorTest( ...@@ -96,8 +136,8 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
g = tf.Graph() g = tf.Graph()
with g.as_default(): with g.as_default():
preprocessed_inputs = tf.placeholder(tf.float32, (5, 256, 256, 3)) preprocessed_inputs = tf.placeholder(tf.float32, (5, 256, 256, 3))
feature_extractor = self._create_feature_extractor() extractor = self._create_feature_extractor()
feature_extractor.extract_features(preprocessed_inputs) extractor.extract_features(preprocessed_inputs)
variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
find_scope = False find_scope = False
for variable in variables: for variable in variables:
...@@ -122,10 +162,10 @@ class LstmSsdMobilenetV1FeatureExtractorTest( ...@@ -122,10 +162,10 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
input_context={}, input_context={},
initial_states=init_state, initial_states=init_state,
capacity=1) capacity=1)
feature_extractor = self._create_feature_extractor() extractor = self._create_feature_extractor()
image = tf.random_uniform([5, 256, 256, 3]) image = tf.random_uniform([5, 256, 256, 3])
with tf.variable_scope('zero_state'): with tf.variable_scope('zero_state'):
feature_map = feature_extractor.extract_features( feature_map = extractor.extract_features(
image, stateful_reader.next_batch) image, stateful_reader.next_batch)
with tf.Session() as sess: with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables_initializer())
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
# ============================================================================== # ==============================================================================
"""Definitions for modified MobileNet models used in LSTD.""" """Definitions for modified MobileNet models used in LSTD."""
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim from tensorflow.contrib import slim as contrib_slim
from nets import mobilenet_v1 from nets import mobilenet_v1
......
...@@ -18,7 +18,7 @@ from __future__ import absolute_import ...@@ -18,7 +18,7 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import tensorflow as tf import tensorflow.compat.v1 as tf
from lstm_object_detection.models import mobilenet_defs from lstm_object_detection.models import mobilenet_defs
from nets import mobilenet_v1 from nets import mobilenet_v1
from nets.mobilenet import mobilenet_v2 from nets.mobilenet import mobilenet_v2
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
from __future__ import print_function from __future__ import print_function
from absl import flags from absl import flags
import numpy as np import numpy as np
import tensorflow as tf import tensorflow.compat.v1 as tf
flags.DEFINE_string('model_path', None, 'Path to model.') flags.DEFINE_string('model_path', None, 'Path to model.')
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
......
...@@ -50,7 +50,9 @@ cc_library( ...@@ -50,7 +50,9 @@ cc_library(
"//utils:ssd_utils", "//utils:ssd_utils",
] + select({ ] + select({
"//conditions:default": [], "//conditions:default": [],
"enable_edgetpu": ["@libedgetpu//libedgetpu:header"], "enable_edgetpu": [
"@libedgetpu//libedgetpu:header",
],
}), }),
alwayslink = 1, alwayslink = 1,
) )
...@@ -71,7 +73,9 @@ cc_library( ...@@ -71,7 +73,9 @@ cc_library(
"@org_tensorflow//tensorflow/lite/kernels:builtin_ops", "@org_tensorflow//tensorflow/lite/kernels:builtin_ops",
] + select({ ] + select({
"//conditions:default": [], "//conditions:default": [],
"enable_edgetpu": ["@libedgetpu//libedgetpu:header"], "enable_edgetpu": [
"@libedgetpu//libedgetpu:header",
],
}), }),
alwayslink = 1, alwayslink = 1,
) )
...@@ -22,6 +22,12 @@ http_archive( ...@@ -22,6 +22,12 @@ http_archive(
strip_prefix = "abseil-cpp-a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a", strip_prefix = "abseil-cpp-a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a",
) )
http_archive(
name = "rules_cc",
strip_prefix = "rules_cc-master",
urls = ["https://github.com/bazelbuild/rules_cc/archive/master.zip"],
)
# GoogleTest/GoogleMock framework. Used by most unit-tests. # GoogleTest/GoogleMock framework. Used by most unit-tests.
http_archive( http_archive(
name = "com_google_googletest", name = "com_google_googletest",
...@@ -90,6 +96,12 @@ http_archive( ...@@ -90,6 +96,12 @@ http_archive(
sha256 = "79d102c61e2a479a0b7e5fc167bcfaa4832a0c6aad4a75fa7da0480564931bcc", sha256 = "79d102c61e2a479a0b7e5fc167bcfaa4832a0c6aad4a75fa7da0480564931bcc",
) )
#
# http_archive(
# name = "com_google_protobuf",
# strip_prefix = "protobuf-master",
# urls = ["https://github.com/protocolbuffers/protobuf/archive/master.zip"],
# )
# Needed by TensorFlow # Needed by TensorFlow
http_archive( http_archive(
......
...@@ -46,7 +46,7 @@ import functools ...@@ -46,7 +46,7 @@ import functools
import json import json
import os import os
from absl import flags from absl import flags
import tensorflow as tf import tensorflow.compat.v1 as tf
from lstm_object_detection import model_builder from lstm_object_detection import model_builder
from lstm_object_detection import trainer from lstm_object_detection import trainer
from lstm_object_detection.inputs import seq_dataset_builder from lstm_object_detection.inputs import seq_dataset_builder
......
...@@ -20,7 +20,8 @@ DetectionModel. ...@@ -20,7 +20,8 @@ DetectionModel.
""" """
import functools import functools
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
from object_detection.builders import optimizer_builder from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields from object_detection.core import standard_fields as fields
...@@ -28,7 +29,7 @@ from object_detection.utils import ops as util_ops ...@@ -28,7 +29,7 @@ from object_detection.utils import ops as util_ops
from object_detection.utils import variables_helper from object_detection.utils import variables_helper
from deployment import model_deploy from deployment import model_deploy
slim = tf.contrib.slim slim = contrib_slim
def create_input_queue(create_tensor_dict_fn): def create_input_queue(create_tensor_dict_fn):
......
...@@ -19,7 +19,7 @@ from __future__ import absolute_import ...@@ -19,7 +19,7 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import tensorflow as tf import tensorflow.compat.v1 as tf
from google.protobuf import text_format from google.protobuf import text_format
from lstm_object_detection.protos import input_reader_google_pb2 # pylint: disable=unused-import from lstm_object_detection.protos import input_reader_google_pb2 # pylint: disable=unused-import
......
...@@ -20,7 +20,7 @@ from __future__ import division ...@@ -20,7 +20,7 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import os import os
import tensorflow as tf import tensorflow.compat.v1 as tf
from google.protobuf import text_format from google.protobuf import text_format
from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2 from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment