Unverified Commit 451906e4 authored by pkulzc's avatar pkulzc Committed by GitHub
Browse files

Release MobileDet code and model, and require tf_slim installation for OD API. (#8562)



* Merged commit includes the following changes:
311933687  by Sergio Guadarrama:

    Removes spurios use of tf.compat.v2, which results in spurious tf.compat.v1.compat.v2. Adds basic test to nasnet_utils.
    Replaces all remaining import tensorflow as tf with import tensorflow.compat.v1 as tf

--
311766063  by Sergio Guadarrama:

    Removes explicit tf.compat.v1 in all call sites (we already import tf.compat.v1, so this code was  doing tf.compat.v1.compat.v1). The existing code worked in latest version of tensorflow, 2.2, (and 1.15) but not in 1.14 or in 2.0.0a, this CL fixes it.

--
311624958  by Sergio Guadarrama:

    Updates README that doesn't render properly in github documentation

--
310980959  by Sergio Guadarrama:

    Moves research_models/slim off tf.contrib.slim/layers/framework to tf_slim

--
310263156  by Sergio Guadarrama:

    Adds model breakdown for MobilenetV3

--
308640516  by Sergio Guadarrama:

    Internal change

308244396  by Sergio Guadarrama:

    GroupNormalization support for MobilenetV3.

--
307475800  by Sergio Guadarrama:

    Internal change

--
302077708  by Sergio Guadarrama:

    Remove `disable_tf2` behavior from slim py_library targets

--
301208453  by Sergio Guadarrama:

    Automated refactoring to make code Python 3 compatible.

--
300816672  by Sergio Guadarrama:

    Internal change

299433840  by Sergio Guadarrama:

    Internal change

299221609  by Sergio Guadarrama:

    Explicitly disable Tensorflow v2 behaviors for all TF1.x binaries and tests

--
299179617  by Sergio Guadarrama:

    Internal change

299040784  by Sergio Guadarrama:

    Internal change

299036699  by Sergio Guadarrama:

    Internal change

298736510  by Sergio Guadarrama:

    Internal change

298732599  by Sergio Guadarrama:

    Internal change

298729507  by Sergio Guadarrama:

    Internal change

298253328  by Sergio Guadarrama:

    Internal change

297788346  by Sergio Guadarrama:

    Internal change

297785278  by Sergio Guadarrama:

    Internal change

297783127  by Sergio Guadarrama:

    Internal change

297725870  by Sergio Guadarrama:

    Internal change

297721811  by Sergio Guadarrama:

    Internal change

297711347  by Sergio Guadarrama:

    Internal change

297708059  by Sergio Guadarrama:

    Internal change

297701831  by Sergio Guadarrama:

    Internal change

297700038  by Sergio Guadarrama:

    Internal change

297670468  by Sergio Guadarrama:

    Internal change.

--
297350326  by Sergio Guadarrama:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
297201668  by Sergio Guadarrama:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
294483372  by Sergio Guadarrama:

    Internal change

PiperOrigin-RevId: 311933687

* Merged commit includes the following changes:
312578615  by Menglong Zhu:

    Modify the LSTM feature extractors to be python 3 compatible.

--
311264357  by Menglong Zhu:

    Removes contrib.slim

--
308957207  by Menglong Zhu:

    Automated refactoring to make code Python 3 compatible.

--
306976470  by yongzhe:

    Internal change

306777559  by Menglong Zhu:

    Internal change

--
299232507  by lzyuan:

    Internal update.

--
299221735  by lzyuan:

    Add small epsilon on max_range for quantize_op to prevent range collapse.

--

PiperOrigin-RevId: 312578615

* Merged commit includes the following changes:
310447280  by lzc:

    Internal changes.

--

PiperOrigin-RevId: 310447280
Co-authored-by: default avatarSergio Guadarrama <sguada@google.com>
Co-authored-by: default avatarMenglong Zhu <menglong@google.com>
parent 73b5be67
......@@ -20,7 +20,7 @@ See export_tflite_ssd_graph.py for usage.
import os
import tempfile
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import saver_pb2
......
......@@ -20,7 +20,9 @@ from __future__ import print_function
import os
import numpy as np
import six
import tensorflow as tf
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.core.framework import types_pb2
from object_detection import export_tflite_ssd_graph_lib
from object_detection import exporter
......@@ -32,11 +34,6 @@ from object_detection.protos import pipeline_pb2
from object_detection.protos import post_processing_pb2
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import slim as contrib_slim
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
if six.PY2:
import mock
......@@ -54,7 +51,7 @@ class FakeModel(model.DetectionModel):
pass
def predict(self, preprocessed_inputs, true_image_shapes):
features = contrib_slim.conv2d(preprocessed_inputs, 3, 1)
features = slim.conv2d(preprocessed_inputs, 3, 1)
with tf.control_dependencies([features]):
prediction_tensors = {
'box_encodings':
......
......@@ -16,7 +16,8 @@
"""Functions to export object detection inference graph."""
import os
import tempfile
import tensorflow as tf
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.tools import freeze_graph # pylint: disable=g-direct-tensorflow-import
from object_detection.builders import graph_rewriter_builder
......@@ -28,7 +29,6 @@ from object_detection.utils import shape_utils
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import slim
from tensorflow.contrib import tfprof as contrib_tfprof
from tensorflow.contrib.quantize.python import graph_matcher
except ImportError:
......
......@@ -21,7 +21,7 @@ from __future__ import print_function
import os
import numpy as np
import six
import tensorflow as tf
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
......@@ -42,7 +42,7 @@ else:
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import slim as contrib_slim
import tf_slim as slim
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
......@@ -1092,7 +1092,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
g = tf.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8))
x_conv = contrib_slim.conv2d(x, 8, 1)
x_conv = slim.conv2d(x, 8, 1)
y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8))
s = ops.nearest_neighbor_upsampling(x_conv, 2)
t = s + y
......@@ -1137,7 +1137,7 @@ class ExportInferenceGraphTest(tf.test.TestCase):
g = tf.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8))
x_conv = contrib_slim.conv2d(x, 8, 1)
x_conv = slim.conv2d(x, 8, 1)
s = ops.nearest_neighbor_upsampling(x_conv, 2)
t = s[:, :19, :19, :]
......
......@@ -109,14 +109,21 @@ Note: If you download the tar.gz file of quantized models and un-tar, you will g
Model name | Pixel 1 Latency (ms) | COCO mAP | Outputs
------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------: | :------: | :-----:
[ssd_mobilenet_v2_mnasfpn_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_mnasfpn_shared_box_predictor_320x320_coco_sync_2020_05_06.tar.gz) | 183 | 26.6 | Boxes
[ssd_mobilenet_v3_large_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v3_large_coco_2020_01_14.tar.gz) | 119 | 22.6 | Boxes
[ssd_mobilenet_v3_small_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v3_small_coco_2020_01_14.tar.gz) | 43 | 15.4 | Boxes
[ssd_mobiledet_cpu_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobiledet_cpu_320x320_coco_2020_05_19.tar.gz) | 113 | 24.0 | Boxes
[ssd_mobilenet_v2_mnasfpn_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_mnasfpn_shared_box_predictor_320x320_coco_sync_2020_05_18.tar.gz) | 183 | 26.6 | Boxes
[ssd_mobilenet_v3_large_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v3_large_coco_2020_01_14.tar.gz) | 119 | 22.6 | Boxes
[ssd_mobilenet_v3_small_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v3_small_coco_2020_01_14.tar.gz) | 43 | 15.4 | Boxes
### Pixel4 Edge TPU models
Model name | Pixel 4 Edge TPU Latency (ms) | COCO mAP | Outputs
Model name | Pixel 4 Edge TPU Latency (ms) | COCO mAP (fp32/uint8) | Outputs
----------------------------------------------------------------------------------------------------------------------------------- | :------------------: | :------: | :-----:
[ssd_mobilenet_edgetpu_coco](https://storage.cloud.google.com/mobilenet_edgetpu/checkpoints/ssdlite_mobilenet_edgetpu_coco_quant.tar.gz) | 6.6 | 24.3 | Boxes
[ssd_mobiledet_edgetpu_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobiledet_edgetpu_320x320_coco_2020_05_19.tar.gz) | 6.9 | 25.9/25.6 | Boxes
[ssd_mobilenet_edgetpu_coco](https://storage.cloud.google.com/mobilenet_edgetpu/checkpoints/ssdlite_mobilenet_edgetpu_coco_quant.tar.gz) | 6.6 | -/24.3 | Boxes
### Pixel4 DSP models
Model name | Pixel 4 DSP Latency (ms) | COCO mAP (fp32/uint8) | Outputs
----------------------------------------------------------------------------------------------------------------------------------- | :------------------: | :------: | :-----:
[ssd_mobiledet_dsp_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobiledet_dsp_320x320_coco_2020_05_19.tar.gz) | 12.3 | 28.9/28.8 | Boxes
## Kitti-trained models
......
......@@ -9,7 +9,7 @@ groundtruth boxes in the dataset. If an image is encountered with more
bounding boxes, the excess boxes will be clipped.
## Q: AttributeError: 'module' object has no attribute 'BackupHandler'
A: This BackupHandler (tf.contrib.slim.tfexample_decoder.BackupHandler) was
A: This BackupHandler (tf_slim.tfexample_decoder.BackupHandler) was
introduced in tensorflow 1.5.0 so runing with earlier versions may cause this
issue. It now has been replaced by
object_detection.data_decoders.tf_example_decoder.BackupHandler. Whoever sees
......
......@@ -8,7 +8,8 @@ Tensorflow Object Detection API depends on the following libraries:
* Python-tk
* Pillow 1.0
* lxml
* tf Slim (which is included in the "tensorflow/models/research/" checkout)
* tf-slim (https://github.com/google-research/tf-slim)
* slim (which is included in the "tensorflow/models/research/" checkout)
* Jupyter notebook
* Matplotlib
* Tensorflow (1.15.0)
......@@ -29,23 +30,25 @@ pip install tensorflow-gpu
The remaining libraries can be installed on Ubuntu 16.04 using via apt-get:
``` bash
```bash
sudo apt-get install protobuf-compiler python-pil python-lxml python-tk
pip install --user Cython
pip install --user contextlib2
pip install --user jupyter
pip install --user matplotlib
pip install --user tf_slim
```
Alternatively, users can install dependencies using pip:
``` bash
```bash
pip install --user Cython
pip install --user contextlib2
pip install --user pillow
pip install --user lxml
pip install --user jupyter
pip install --user matplotlib
pip install --user tf_slim
```
<!-- common_typos_disable -->
......@@ -161,6 +164,10 @@ to avoid running this manually, you can add it as a new line to the end of your
tensorflow/models/research on your system. After updating ~/.bashrc file you
can run the following command:
Note: Some of the functions defined in tensorflow/models/research/slim has been
moved to [tf-slim](https://github.com/google-research/tf-slim), so installing
tf_slim is required now.
``` bash
source ~/.bashrc
```
......
......@@ -15,7 +15,7 @@
"""Utility functions for detection inference."""
from __future__ import division
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
......
......@@ -19,7 +19,7 @@ import os
import numpy as np
from PIL import Image
import six
import tensorflow as tf
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.core import standard_fields
......
......@@ -35,7 +35,7 @@ metrics).
"""
import itertools
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection.inference import detection_inference
tf.flags.DEFINE_string('input_tfrecord_paths', None,
......
......@@ -20,7 +20,7 @@ from __future__ import print_function
import functools
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection.builders import dataset_builder
from object_detection.builders import image_resizer_builder
from object_detection.builders import model_builder
......@@ -189,10 +189,21 @@ def transform_input_data(tensor_dict,
Returns:
A dictionary keyed by fields.InputDataFields containing the tensors obtained
after applying all the transformations.
Raises:
KeyError: If both groundtruth_labeled_classes and groundtruth_image_classes
are provided by the decoder in tensor_dict since both fields are
considered to contain the same information.
"""
out_tensor_dict = tensor_dict.copy()
labeled_classes_field = fields.InputDataFields.groundtruth_labeled_classes
image_classes_field = fields.InputDataFields.groundtruth_image_classes
if (labeled_classes_field in out_tensor_dict and
image_classes_field in out_tensor_dict):
raise KeyError('groundtruth_labeled_classes and groundtruth_image_classes'
'are provided by the decoder, but only one should be set.')
if labeled_classes_field in out_tensor_dict:
# tf_example_decoder casts unrecognized labels to -1. Remove these
# unrecognized labels before converting labeled_classes to k-hot vector.
......@@ -201,6 +212,10 @@ def transform_input_data(tensor_dict,
out_tensor_dict[labeled_classes_field] = _convert_labeled_classes_to_k_hot(
out_tensor_dict[labeled_classes_field], num_classes)
if image_classes_field in out_tensor_dict:
out_tensor_dict[labeled_classes_field] = _convert_labeled_classes_to_k_hot(
out_tensor_dict[image_classes_field], num_classes)
if fields.InputDataFields.multiclass_scores in out_tensor_dict:
out_tensor_dict[
fields.InputDataFields
......@@ -475,6 +490,9 @@ def pad_input_data_to_static_shapes(tensor_dict,
if fields.InputDataFields.context_feature_length in tensor_dict:
padding_shapes[fields.InputDataFields.context_feature_length] = []
if fields.InputDataFields.is_annotated in tensor_dict:
padding_shapes[fields.InputDataFields.is_annotated] = []
padded_tensor_dict = {}
for tensor_name in tensor_dict:
padded_tensor_dict[tensor_name] = shape_utils.pad_or_clip_nd(
......@@ -551,6 +569,7 @@ def _get_labels_dict(input_dict):
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_keypoint_weights,
......@@ -700,6 +719,8 @@ def train_input(train_config, train_input_config,
labels[fields.InputDataFields.groundtruth_visibilities] is a
[batch_size, num_boxes, num_keypoints] bool tensor containing
groundtruth visibilities for each keypoint.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a
[batch_size, num_classes] float32 k-hot tensor of classes.
Raises:
TypeError: if the `train_config`, `train_input_config` or `model_config`
......@@ -760,12 +781,14 @@ def train_input(train_config, train_input_config,
include_source_id = train_input_config.include_source_id
return (_get_features_dict(tensor_dict, include_source_id),
_get_labels_dict(tensor_dict))
reduce_to_frame_fn = get_reduce_to_frame_fn(train_input_config, True)
dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](
train_input_config,
transform_input_data_fn=transform_and_pad_input_data_fn,
batch_size=params['batch_size'] if params else train_config.batch_size,
input_context=input_context)
input_context=input_context,
reduce_to_frame_fn=reduce_to_frame_fn)
return dataset
......@@ -834,6 +857,11 @@ def eval_input(eval_config, eval_input_config, model_config,
labels[fields.InputDataFields.groundtruth_visibilities] is a
[batch_size, num_boxes, num_keypoints] bool tensor containing
groundtruth visibilities for each keypoint.
labels[fields.InputDataFields.groundtruth_group_of] is a [1, num_boxes]
bool tensor indicating if the box covers more than 5 instances of the
same class which heavily occlude each other.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a
[num_boxes, num_classes] float32 k-hot tensor of classes.
Raises:
TypeError: if the `eval_config`, `eval_input_config` or `model_config`
......@@ -894,10 +922,14 @@ def eval_input(eval_config, eval_input_config, model_config,
include_source_id = eval_input_config.include_source_id
return (_get_features_dict(tensor_dict, include_source_id),
_get_labels_dict(tensor_dict))
reduce_to_frame_fn = get_reduce_to_frame_fn(eval_input_config, False)
dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](
eval_input_config,
batch_size=params['batch_size'] if params else eval_config.batch_size,
transform_input_data_fn=transform_and_pad_input_data_fn)
transform_input_data_fn=transform_and_pad_input_data_fn,
reduce_to_frame_fn=reduce_to_frame_fn)
return dataset
......@@ -953,3 +985,74 @@ def create_predict_input_fn(model_config, predict_input_config):
receiver_tensors={SERVING_FED_EXAMPLE_KEY: example})
return _predict_input_fn
def get_reduce_to_frame_fn(input_reader_config, is_training):
"""Returns a function reducing sequence tensors to single frame tensors.
If the input type is not TF_SEQUENCE_EXAMPLE, the tensors are passed through
this function unchanged. Otherwise, when in training mode, a single frame is
selected at random from the sequence example, and the tensors for that frame
are converted to single frame tensors, with all associated context features.
In evaluation mode all frames are converted to single frame tensors with
copied context tensors. After the sequence example tensors are converted into
one or many single frame tensors, the images from each frame are decoded.
Args:
input_reader_config: An input_reader_pb2.InputReader.
is_training: Whether we are in training mode.
Returns:
`reduce_to_frame_fn` for the dataset builder
"""
if input_reader_config.input_type != (
input_reader_pb2.InputType.TF_SEQUENCE_EXAMPLE):
return lambda d: d
else:
def reduce_to_frame(dataset):
"""Returns a function reducing sequence tensors to single frame tensors.
Args:
dataset: A tf dataset containing sequence tensors.
Returns:
A tf dataset containing single frame tensors.
"""
if is_training:
def get_single_frame(tensor_dict):
"""Returns a random frame from a sequence.
Picks a random frame and returns slices of sequence tensors
corresponding to the random frame. Returns non-sequence tensors
unchanged.
Args:
tensor_dict: A dictionary containing sequence tensors.
Returns:
Tensors for a single random frame within the sequence.
"""
num_frames = tf.cast(
tf.shape(tensor_dict[fields.InputDataFields.source_id])[0],
dtype=tf.int32)
frame_index = tf.random.uniform((), minval=0, maxval=num_frames,
dtype=tf.int32)
out_tensor_dict = {}
for key in tensor_dict:
if key in fields.SEQUENCE_FIELDS:
# Slice random frame from sequence tensors
out_tensor_dict[key] = tensor_dict[key][frame_index]
else:
# Copy all context tensors.
out_tensor_dict[key] = tensor_dict[key]
return out_tensor_dict
dataset = dataset.map(get_single_frame, tf.data.experimental.AUTOTUNE)
else:
dataset = dataset.map(util_ops.tile_context_tensors,
tf.data.experimental.AUTOTUNE)
dataset = dataset.unbatch()
# Decode frame here as SequenceExample tensors contain encoded images.
dataset = dataset.map(util_ops.decode_image,
tf.data.experimental.AUTOTUNE)
return dataset
return reduce_to_frame
......@@ -24,7 +24,7 @@ from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection import inputs
from object_detection.core import preprocessor
......@@ -53,6 +53,25 @@ def _get_configs_for_model(model_name):
configs, kwargs_dict=override_dict)
def _get_configs_for_model_sequence_example(model_name):
"""Returns configurations for model."""
fname = os.path.join(tf.resource_loader.get_data_files_path(),
'test_data/' + model_name + '.config')
label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),
'data/snapshot_serengeti_label_map.pbtxt')
data_path = os.path.join(
tf.resource_loader.get_data_files_path(),
'test_data/snapshot_serengeti_sequence_examples.record')
configs = config_util.get_configs_from_pipeline_file(fname)
override_dict = {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path
}
return config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def _make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
......@@ -62,7 +81,7 @@ def _make_initializable_iterator(dataset):
Returns:
A `tf.data.Iterator`.
"""
iterator = dataset.make_initializable_iterator()
iterator = tf.data.make_initializable_iterator(dataset)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
......@@ -205,6 +224,85 @@ class InputsTest(test_case.TestCase, parameterized.TestCase):
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_context_rcnn_resnet50_train_input_with_sequence_example(
self, train_batch_size=8):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
train_config = configs['train_config']
train_config.batch_size = train_batch_size
train_input_fn = inputs.create_train_input_fn(
train_config, configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([train_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([train_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[train_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[train_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_context_rcnn_resnet50_eval_input_with_sequence_example(
self, eval_batch_size=8):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_ssd_inceptionV2_train_input(self):
"""Tests the training input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
......
......@@ -44,9 +44,8 @@ Example usage:
"""
import functools
import os
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
import tensorflow.compat.v1 as tf
from tensorflow.python.util.deprecation import deprecated
from object_detection.builders import dataset_builder
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
......@@ -81,7 +80,7 @@ flags.DEFINE_boolean(
FLAGS = flags.FLAGS
@contrib_framework.deprecated(None, 'Use object_detection/model_main.py.')
@deprecated(None, 'Use object_detection/model_main.py.')
def main(unused_argv):
assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
assert FLAGS.eval_dir, '`eval_dir` is missing.'
......
......@@ -19,7 +19,7 @@ DetectionModel.
"""
import logging
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection import eval_util
from object_detection.core import prefetcher
......
......@@ -44,8 +44,9 @@ Example usage:
import functools
import json
import os
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
import tensorflow.compat.v1 as tf
from tensorflow.python.util.deprecation import deprecated
from object_detection.builders import dataset_builder
from object_detection.builders import graph_rewriter_builder
......@@ -85,7 +86,7 @@ flags.DEFINE_string('model_config_path', '',
FLAGS = flags.FLAGS
@contrib_framework.deprecated(None, 'Use object_detection/model_main.py.')
@deprecated(None, 'Use object_detection/model_main.py.')
def main(_):
assert FLAGS.train_dir, '`train_dir` is missing.'
if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir)
......
......@@ -21,8 +21,8 @@ DetectionModel.
import functools
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.builders import optimizer_builder
from object_detection.builders import preprocessor_builder
......@@ -33,8 +33,6 @@ from object_detection.utils import ops as util_ops
from object_detection.utils import variables_helper
from deployment import model_deploy
slim = contrib_slim
def create_input_queue(batch_size_per_clone, create_tensor_dict_fn,
batch_queue_capacity, num_batch_queue_threads,
......
......@@ -15,10 +15,9 @@
"""Tests for object_detection.trainer."""
import tensorflow as tf
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from tensorflow.contrib import layers as contrib_layers
from object_detection.core import losses
from object_detection.core import model
......@@ -90,10 +89,9 @@ class FakeDetectionModel(model.DetectionModel):
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = contrib_layers.flatten(preprocessed_inputs)
class_prediction = contrib_layers.fully_connected(flattened_inputs,
self._num_classes)
box_prediction = contrib_layers.fully_connected(flattened_inputs, 4)
flattened_inputs = slim.flatten(preprocessed_inputs)
class_prediction = slim.fully_connected(flattened_inputs, self._num_classes)
box_prediction = slim.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': tf.reshape(
......
......@@ -26,7 +26,7 @@ This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection.core import matcher
from object_detection.utils import shape_utils
......
......@@ -16,7 +16,7 @@
"""Tests for object_detection.matchers.argmax_matcher."""
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from object_detection.matchers import argmax_matcher
from object_detection.utils import test_case
......
......@@ -15,7 +15,7 @@
"""Bipartite matcher implementation."""
import tensorflow as tf
import tensorflow.compat.v1 as tf
from tensorflow.contrib.image.python.ops import image_ops
from object_detection.core import matcher
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment