Unverified Commit 4a0b3e4b authored by Liangzhe's avatar Liangzhe Committed by GitHub
Browse files

Merged commit includes the following changes: (#8235)



298416930  by lzyuan:

    Explicitly mark base models' state inputs as 'raw_inputs/init_lstm_h_1' and 'raw_inputs_init_lstm_h_2' when pre_bottleneck=True.

--
298380851  by skligys:

    Fix LSTD LSTM cells to use fixed_quantize_op().

--
297662737  by Menglong Zhu:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
289667197  by lzyuan:

    Internal update.

--
288607438  by lzyuan:

    Enforce feature_extractor construction using arg keys.

--

PiperOrigin-RevId: 298416930
Co-authored-by: default avatarMenglong Zhu <menglong@google.com>
parent 8b47aa3d
......@@ -21,7 +21,9 @@ customization of freeze_bn_delay.
"""
import re
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.contrib.quantize.python import quant_ops
......@@ -72,17 +74,18 @@ def build(graph_rewriter_config,
# Quantize the graph by inserting quantize ops for weights and activations
if is_training:
tf.contrib.quantize.experimental_create_training_graph(
contrib_quantize.experimental_create_training_graph(
input_graph=graph,
quant_delay=graph_rewriter_config.quantization.delay,
freeze_bn_delay=graph_rewriter_config.quantization.delay)
else:
tf.contrib.quantize.experimental_create_eval_graph(
contrib_quantize.experimental_create_eval_graph(
input_graph=graph,
quant_delay=graph_rewriter_config.quantization.delay
if not is_export else 0)
tf.contrib.layers.summarize_collection('quant_vars')
contrib_layers.summarize_collection('quant_vars')
return graph_rewrite_fn
......
......@@ -15,7 +15,9 @@
"""Tests for graph_rewriter_builder."""
import mock
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from lstm_object_detection.builders import graph_rewriter_builder
......@@ -27,9 +29,9 @@ class QuantizationBuilderTest(tf.test.TestCase):
def testQuantizationBuilderSetsUpCorrectTrainArguments(self):
with mock.patch.object(
tf.contrib.quantize,
contrib_quantize,
'experimental_create_training_graph') as mock_quant_fn:
with mock.patch.object(tf.contrib.layers,
with mock.patch.object(contrib_layers,
'summarize_collection') as mock_summarize_col:
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
......@@ -44,9 +46,9 @@ class QuantizationBuilderTest(tf.test.TestCase):
mock_summarize_col.assert_called_with('quant_vars')
def testQuantizationBuilderSetsUpCorrectEvalArguments(self):
with mock.patch.object(tf.contrib.quantize,
with mock.patch.object(contrib_quantize,
'experimental_create_eval_graph') as mock_quant_fn:
with mock.patch.object(tf.contrib.layers,
with mock.patch.object(contrib_layers,
'summarize_collection') as mock_summarize_col:
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
......
......@@ -25,7 +25,7 @@ This executable is used to evaluate DetectionModels. Example usage:
import functools
import os
import tensorflow as tf
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from lstm_object_detection import evaluator
from lstm_object_detection import model_builder
......
......@@ -20,7 +20,8 @@ DetectionModel.
"""
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import tfprof as contrib_tfprof
from lstm_object_detection.metrics import coco_evaluation_all_frames
from object_detection import eval_util
from object_detection.core import prefetcher
......@@ -105,13 +106,13 @@ def _extract_prediction_tensors(model,
detections = _create_detection_op(model, input_dict, batch)
# Print out anaylsis of the model.
tf.contrib.tfprof.model_analyzer.print_model_analysis(
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=tf.contrib.tfprof.model_analyzer.
TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
tf.contrib.tfprof.model_analyzer.print_model_analysis(
tfprof_options=contrib_tfprof.model_analyzer
.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=tf.contrib.tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
num_frames = len(input_dict[fields.InputDataFields.image])
ret = []
......
......@@ -84,7 +84,7 @@ python lstm_object_detection/export_tflite_lstd_graph.py \
"
"""
import tensorflow as tf
import tensorflow.compat.v1 as tf
from lstm_object_detection import export_tflite_lstd_graph_lib
from lstm_object_detection.utils import config_util
......
......@@ -20,7 +20,7 @@ import os
import tempfile
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
......
......@@ -17,7 +17,7 @@
import os
from absl import flags
import tensorflow as tf
import tensorflow.compat.v1 as tf
from lstm_object_detection.utils import config_util
......
......@@ -22,7 +22,8 @@ Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss
from lstm_object_detection.inputs import tf_sequence_example_decoder
from lstm_object_detection.protos import input_reader_google_pb2
......@@ -32,7 +33,7 @@ from object_detection.core import standard_fields as fields
from object_detection.protos import input_reader_pb2
from object_detection.utils import ops as util_ops
parallel_reader = tf.contrib.slim.parallel_reader
parallel_reader = contrib_slim.parallel_reader
# TODO(yinxiao): Make the following variable into configurable proto.
# Padding size for the labeled objects in each frame. Here we assume each
# frame has a total number of objects less than _PADDING_SIZE.
......
......@@ -17,7 +17,7 @@
import os
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
......@@ -33,6 +33,68 @@ from object_detection.protos import preprocessor_pb2
class DatasetBuilderTest(tf.test.TestCase):
def _create_tf_record(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(16, 16, 3)).astype(np.uint8)
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
sequence_example = example_pb2.SequenceExample(
context=feature_pb2.Features(
feature={
'image/format':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=['jpeg'.encode('utf-8')])),
'image/height':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[16])),
'image/width':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[16])),
}),
feature_lists=feature_pb2.FeatureLists(
feature_list={
'image/encoded':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[encoded_jpeg])),
]),
'image/object/bbox/xmin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'image/object/bbox/xmax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
'image/object/bbox/ymin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'image/object/bbox/ymax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
'image/object/class/label':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[2]))
]),
}))
writer.write(sequence_example.SerializeToString())
writer.close()
return path
def _get_model_configs_from_proto(self):
"""Creates a model text proto for testing.
......
......@@ -18,11 +18,12 @@
A decoder to decode string tensors containing serialized
tensorflow.SequenceExample protos.
"""
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
from object_detection.core import data_decoder
from object_detection.core import standard_fields as fields
tfexample_decoder = tf.contrib.slim.tfexample_decoder
tfexample_decoder = contrib_slim.tfexample_decoder
class BoundingBoxSequence(tfexample_decoder.ItemHandler):
......
......@@ -16,7 +16,7 @@
"""Tests for lstm_object_detection.tf_sequence_example_decoder."""
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import dtypes
......
......@@ -14,7 +14,7 @@
# ==============================================================================
"""BottleneckConvLSTMCell implementation."""
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import rnn as contrib_rnn
......@@ -494,11 +494,10 @@ class GroupedConvLSTMCell(contrib_rnn.RNNCell):
f_act = tf.sigmoid(f_add)
# The quantization range is fixed for the sigmoid to ensure that zero
# is exactly representable.
f_act = lstm_utils.quantize_op(
f_act = lstm_utils.fixed_quantize_op(
f_act,
is_training=False,
default_min=0,
default_max=1,
fixed_min=0.0,
fixed_max=1.0,
is_quantized=self._is_quantized,
scope='forget_gate_%d/act_quant' % k)
......@@ -512,22 +511,20 @@ class GroupedConvLSTMCell(contrib_rnn.RNNCell):
i_act = tf.sigmoid(i)
# The quantization range is fixed for the sigmoid to ensure that zero
# is exactly representable.
i_act = lstm_utils.quantize_op(
i_act = lstm_utils.fixed_quantize_op(
i_act,
is_training=False,
default_min=0,
default_max=1,
fixed_min=0.0,
fixed_max=1.0,
is_quantized=self._is_quantized,
scope='input_gate_%d/act_quant' % k)
j_act = self._activation(j)
# The quantization range is fixed for the relu6 to ensure that zero
# is exactly representable.
j_act = lstm_utils.quantize_op(
j_act = lstm_utils.fixed_quantize_op(
j_act,
is_training=False,
default_min=0,
default_max=6,
fixed_min=0.0,
fixed_max=6.0,
is_quantized=self._is_quantized,
scope='new_input_%d/act_quant' % k)
......@@ -546,11 +543,10 @@ class GroupedConvLSTMCell(contrib_rnn.RNNCell):
# to the concat have the same range, removing the need for rescaling.
# The quantization ranges input to the relu6 are propagated to its
# output. Any mismatch between these two ranges will cause an error.
new_c = lstm_utils.quantize_op(
new_c = lstm_utils.fixed_quantize_op(
new_c,
is_training=False,
default_min=0,
default_max=6,
fixed_min=0.0,
fixed_max=6.0,
is_quantized=self._is_quantized,
scope='new_c_%d/add_quant' % k)
......@@ -565,22 +561,20 @@ class GroupedConvLSTMCell(contrib_rnn.RNNCell):
new_c_act = self._activation(new_c)
# The quantization range is fixed for the relu6 to ensure that zero
# is exactly representable.
new_c_act = lstm_utils.quantize_op(
new_c_act = lstm_utils.fixed_quantize_op(
new_c_act,
is_training=False,
default_min=0,
default_max=6,
fixed_min=0.0,
fixed_max=6.0,
is_quantized=self._is_quantized,
scope='new_c_%d/act_quant' % k)
o_act = tf.sigmoid(o)
# The quantization range is fixed for the sigmoid to ensure that zero
# is exactly representable.
o_act = lstm_utils.quantize_op(
o_act = lstm_utils.fixed_quantize_op(
o_act,
is_training=False,
default_min=0,
default_max=1,
fixed_min=0.0,
fixed_max=1.0,
is_quantized=self._is_quantized,
scope='output_%d/act_quant' % k)
......@@ -588,11 +582,10 @@ class GroupedConvLSTMCell(contrib_rnn.RNNCell):
# The quantization range is fixed since it is input to a concat.
# A range of [0, 6] is used since |new_h| is a product of ranges [0, 6]
# and [0, 1].
new_h_act = lstm_utils.quantize_op(
new_h_act = lstm_utils.fixed_quantize_op(
new_h,
is_training=False,
default_min=0,
default_max=6,
fixed_min=0.0,
fixed_max=6.0,
is_quantized=self._is_quantized,
scope='new_h_%d/act_quant' % k)
......@@ -710,7 +703,8 @@ class GroupedConvLSTMCell(contrib_rnn.RNNCell):
raise ValueError('Expect rank 2 state tensor when flatten_state is set.')
with tf.name_scope(None):
state = tf.identity(state, name='raw_inputs/init_lstm_h')
state = tf.identity(
state, name='raw_inputs/init_lstm_h_%d' % (input_index + 1))
if self._flatten_state:
batch_size = inputs.shape[0]
height = inputs.shape[1]
......
......@@ -19,7 +19,7 @@ from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from lstm_object_detection.lstm import lstm_cells
......
......@@ -15,7 +15,7 @@
"""Custom RNN decoder."""
import tensorflow as tf
import tensorflow.compat.v1 as tf
import lstm_object_detection.lstm.utils as lstm_utils
......
......@@ -20,7 +20,7 @@ from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import rnn as contrib_rnn
......
......@@ -18,7 +18,7 @@
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from tensorflow.python.training import moving_averages
......@@ -204,11 +204,17 @@ def quantize_op(inputs,
Returns:
Tensor resulting from quantizing the input tensors.
"""
if is_quantized:
if not is_quantized:
return inputs
with tf.variable_scope(scope):
min_var = _quant_var('min', default_min)
max_var = _quant_var('max', default_max)
if is_training:
if not is_training:
# Just use variables in the checkpoint.
return tf.fake_quant_with_min_max_vars(inputs, min_var, max_var)
# While training, collect EMAs of ranges seen, store in min_var, max_var.
# TFLite requires that 0.0 is always in the [min; max] range.
range_min = tf.minimum(tf.reduce_min(inputs), 0.0, 'SafeQuantRangeMin')
range_max = tf.maximum(tf.reduce_max(inputs), 0.0, 'SafeQuantRangeMax')
......@@ -216,7 +222,26 @@ def quantize_op(inputs,
min_var, range_min, ema_decay, name='AssignMinEma')
max_val = moving_averages.assign_moving_average(
max_var, range_max, ema_decay, name='AssignMaxEma')
inputs = tf.fake_quant_with_min_max_vars(inputs, min_val, max_val)
else:
inputs = tf.fake_quant_with_min_max_vars(inputs, min_var, max_var)
return tf.fake_quant_with_min_max_vars(inputs, min_val, max_val)
def fixed_quantize_op(inputs, is_quantized=True,
fixed_min=0.0, fixed_max=6.0, scope='quant'):
"""Inserts a fake quantization op with fixed range after inputs.
Args:
inputs: A tensor of size [batch_size, height, width, channels].
is_quantized: flag to enable/disable quantization.
fixed_min: fixed min value for fake quant op.
fixed_max: fixed max value for fake quant op.
scope: Optional scope for variable_scope.
Returns:
Tensor resulting from quantizing the input tensors.
"""
if not is_quantized:
return inputs
with tf.variable_scope(scope):
# Just use fixed quantization range.
return tf.fake_quant_with_min_max_args(inputs, fixed_min, fixed_max)
......@@ -18,7 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.compat.v1 as tf
from lstm_object_detection.lstm import utils
......@@ -73,13 +73,20 @@ class QuantizableUtilsTest(tf.test.TestCase):
self._check_min_max_ema(tf.get_default_graph())
self._check_min_max_vars(tf.get_default_graph())
def test_quantize_op_inferene(self):
def test_quantize_op_inference(self):
inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32)
outputs = utils.quantize_op(inputs, is_training=False)
self.assertAllEqual(inputs.shape.as_list(), outputs.shape.as_list())
self._check_no_min_max_ema(tf.get_default_graph())
self._check_min_max_vars(tf.get_default_graph())
def test_fixed_quantize_op(self):
inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32)
outputs = utils.fixed_quantize_op(inputs)
self.assertAllEqual(inputs.shape.as_list(), outputs.shape.as_list())
self._check_no_min_max_ema(tf.get_default_graph())
self._check_no_min_max_vars(tf.get_default_graph())
def _check_min_max_vars(self, graph):
op_types = [op.type for op in graph.get_operations()]
self.assertTrue(
......
......@@ -24,7 +24,8 @@ for details.
"""
import abc
import re
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
from object_detection.core import box_list_ops
from object_detection.core import matcher
......@@ -33,7 +34,7 @@ from object_detection.meta_architectures import ssd_meta_arch
from object_detection.utils import ops
from object_detection.utils import shape_utils
slim = tf.contrib.slim
slim = contrib_slim
class LSTMSSDMetaArch(ssd_meta_arch.SSDMetaArch):
......
......@@ -22,7 +22,8 @@ from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
from lstm_object_detection.lstm import lstm_cells
from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch
......@@ -38,7 +39,7 @@ from object_detection.utils import test_case
from object_detection.utils import test_utils
slim = tf.contrib.slim
slim = contrib_slim
MAX_TOTAL_NUM_BOXES = 5
NUM_CLASSES = 1
......
......@@ -15,7 +15,7 @@
"""Class for evaluating video object detections with COCO metrics."""
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.metrics import coco_evaluation
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment