Unverified Commit fd7b6887 authored by Jonathan Huang's avatar Jonathan Huang Committed by GitHub
Browse files

Merge pull request #3293 from pkulzc/master

Internal changes of object_detection 
parents f98ec55e 1efe98bb
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model input function for tf-learn object detection model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from object_detection.builders import dataset_builder
from object_detection.builders import image_resizer_builder
from object_detection.builders import model_builder
from object_detection.builders import preprocessor_builder
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import eval_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import train_pb2
from object_detection.utils import config_util
from object_detection.utils import dataset_util
from object_detection.utils import ops as util_ops
HASH_KEY = 'hash'
HASH_BINS = 1 << 31
SERVING_FED_EXAMPLE_KEY = 'serialized_example'
def transform_input_data(tensor_dict,
model_preprocess_fn,
image_resizer_fn,
num_classes,
data_augmentation_fn=None,
merge_multiple_boxes=False,
retain_original_image=False):
"""A single function that is responsible for all input data transformations.
Data transformation functions are applied in the following order.
1. data_augmentation_fn (optional): applied on tensor_dict.
2. model_preprocess_fn: applied only on image tensor in tensor_dict.
3. image_resizer_fn: applied only on instance mask tensor in tensor_dict.
4. one_hot_encoding: applied to classes tensor in tensor_dict.
5. merge_multiple_boxes (optional): when groundtruth boxes are exactly the
same they can be merged into a single box with an associated k-hot class
label.
Args:
tensor_dict: dictionary containing input tensors keyed by
fields.InputDataFields.
model_preprocess_fn: model's preprocess function to apply on image tensor.
This function must take in a 4-D float tensor and return a 4-D preprocess
float tensor and a tensor containing the true image shape.
image_resizer_fn: image resizer function to apply on groundtruth instance
masks. This function must take a 4-D float tensor of image and a 4-D
tensor of instances masks and return resized version of these along with
the true shapes.
num_classes: number of max classes to one-hot (or k-hot) encode the class
labels.
data_augmentation_fn: (optional) data augmentation function to apply on
input `tensor_dict`.
merge_multiple_boxes: (optional) whether to merge multiple groundtruth boxes
and classes for a given image if the boxes are exactly the same.
retain_original_image: (optional) whether to retain original image in the
output dictionary.
Returns:
A dictionary keyed by fields.InputDataFields containing the tensors obtained
after applying all the transformations.
"""
if retain_original_image:
tensor_dict[fields.InputDataFields.
original_image] = tensor_dict[fields.InputDataFields.image]
# Apply data augmentation ops.
if data_augmentation_fn is not None:
tensor_dict = data_augmentation_fn(tensor_dict)
# Apply model preprocessing ops and resize instance masks.
image = tf.expand_dims(
tf.to_float(tensor_dict[fields.InputDataFields.image]), axis=0)
preprocessed_resized_image, true_image_shape = model_preprocess_fn(image)
tensor_dict[fields.InputDataFields.image] = tf.squeeze(
preprocessed_resized_image, axis=0)
tensor_dict[fields.InputDataFields.true_image_shape] = tf.squeeze(
true_image_shape, axis=0)
if fields.InputDataFields.groundtruth_instance_masks in tensor_dict:
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
_, resized_masks, _ = image_resizer_fn(image, masks)
tensor_dict[fields.InputDataFields.
groundtruth_instance_masks] = resized_masks
# Transform groundtruth classes to one hot encodings.
label_offset = 1
zero_indexed_groundtruth_classes = tensor_dict[
fields.InputDataFields.groundtruth_classes] - label_offset
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
zero_indexed_groundtruth_classes, num_classes)
if merge_multiple_boxes:
merged_boxes, merged_classes, _ = util_ops.merge_boxes_with_multiple_labels(
tensor_dict[fields.InputDataFields.groundtruth_boxes],
zero_indexed_groundtruth_classes, num_classes)
tensor_dict[fields.InputDataFields.groundtruth_boxes] = merged_boxes
tensor_dict[fields.InputDataFields.groundtruth_classes] = merged_classes
return tensor_dict
def augment_input_data(tensor_dict, data_augmentation_options):
"""Applies data augmentation ops to input tensors.
Args:
tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields.
data_augmentation_options: A list of tuples, where each tuple contains a
function and a dictionary that contains arguments and their values.
Usually, this is the output of core/preprocessor.build.
Returns:
A dictionary of tensors obtained by applying data augmentation ops to the
input tensor dictionary.
"""
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tf.to_float(tensor_dict[fields.InputDataFields.image]), 0)
include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks
in tensor_dict)
include_keypoints = (fields.InputDataFields.groundtruth_keypoints
in tensor_dict)
tensor_dict = preprocessor.preprocess(
tensor_dict, data_augmentation_options,
func_arg_map=preprocessor.get_default_func_arg_map(
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints))
tensor_dict[fields.InputDataFields.image] = tf.squeeze(
tensor_dict[fields.InputDataFields.image], axis=0)
return tensor_dict
def create_train_input_fn(train_config, train_input_config,
model_config):
"""Creates a train `input` function for `Estimator`.
Args:
train_config: A train_pb2.TrainConfig.
train_input_config: An input_reader_pb2.InputReader.
model_config: A model_pb2.DetectionModel.
Returns:
`input_fn` for `Estimator` in TRAIN mode.
"""
def _train_input_fn(params=None):
"""Returns `features` and `labels` tensor dictionaries for training.
Args:
params: Parameter dictionary passed from the estimator.
Returns:
features: Dictionary of feature tensors.
features[fields.InputDataFields.image] is a [batch_size, H, W, C]
float32 tensor with preprocessed images.
features[HASH_KEY] is a [batch_size] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
labels: Dictionary of groundtruth tensors.
labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size]
int32 tensor indicating the number of groundtruth boxes.
labels[fields.InputDataFields.groundtruth_boxes] is a
[batch_size, num_boxes, 4] float32 tensor containing the corners of
the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a
[batch_size, num_boxes, num_classes] float32 one-hot tensor of
classes.
labels[fields.InputDataFields.groundtruth_weights] is a
[batch_size, num_boxes] float32 tensor containing groundtruth weights
for the boxes.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
[batch_size, num_boxes, H, W] float32 tensor containing only binary
values, which represent instance masks for objects.
labels[fields.InputDataFields.groundtruth_keypoints] is a
[batch_size, num_boxes, num_keypoints, 2] float32 tensor containing
keypoints for each box.
Raises:
TypeError: if the `train_config` or `train_input_config` are not of the
correct type.
"""
if not isinstance(train_config, train_pb2.TrainConfig):
raise TypeError('For training mode, the `train_config` must be a '
'train_pb2.TrainConfig.')
if not isinstance(train_input_config, input_reader_pb2.InputReader):
raise TypeError('The `train_input_config` must be a '
'input_reader_pb2.InputReader.')
if not isinstance(model_config, model_pb2.DetectionModel):
raise TypeError('The `model_config` must be a '
'model_pb2.DetectionModel.')
data_augmentation_options = [
preprocessor_builder.build(step)
for step in train_config.data_augmentation_options
]
data_augmentation_fn = functools.partial(
augment_input_data, data_augmentation_options=data_augmentation_options)
model = model_builder.build(model_config, is_training=True)
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
transform_data_fn = functools.partial(
transform_input_data, model_preprocess_fn=model.preprocess,
image_resizer_fn=image_resizer_fn,
num_classes=config_util.get_number_of_classes(model_config),
data_augmentation_fn=data_augmentation_fn)
dataset = dataset_builder.build(
train_input_config,
transform_input_data_fn=transform_data_fn,
batch_size=params['batch_size'] if params else train_config.batch_size,
max_num_boxes=train_config.max_number_of_boxes,
num_classes=config_util.get_number_of_classes(model_config),
spatial_image_shape=config_util.get_spatial_image_size(
image_resizer_config))
tensor_dict = dataset_util.make_initializable_iterator(dataset).get_next()
hash_from_source_id = tf.string_to_hash_bucket_fast(
tensor_dict[fields.InputDataFields.source_id], HASH_BINS)
features = {
fields.InputDataFields.image: tensor_dict[fields.InputDataFields.image],
HASH_KEY: tf.cast(hash_from_source_id, tf.int32),
fields.InputDataFields.true_image_shape: tensor_dict[
fields.InputDataFields.true_image_shape]
}
labels = {
fields.InputDataFields.num_groundtruth_boxes: tensor_dict[
fields.InputDataFields.num_groundtruth_boxes],
fields.InputDataFields.groundtruth_boxes: tensor_dict[
fields.InputDataFields.groundtruth_boxes],
fields.InputDataFields.groundtruth_classes: tensor_dict[
fields.InputDataFields.groundtruth_classes],
fields.InputDataFields.groundtruth_weights: tensor_dict[
fields.InputDataFields.groundtruth_weights]
}
if fields.InputDataFields.groundtruth_keypoints in tensor_dict:
labels[fields.InputDataFields.groundtruth_keypoints] = tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
if fields.InputDataFields.groundtruth_instance_masks in tensor_dict:
labels[fields.InputDataFields.groundtruth_instance_masks] = tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
return features, labels
return _train_input_fn
def create_eval_input_fn(eval_config, eval_input_config, model_config):
"""Creates an eval `input` function for `Estimator`.
Args:
eval_config: An eval_pb2.EvalConfig.
eval_input_config: An input_reader_pb2.InputReader.
model_config: A model_pb2.DetectionModel.
Returns:
`input_fn` for `Estimator` in EVAL mode.
"""
def _eval_input_fn(params=None):
"""Returns `features` and `labels` tensor dictionaries for evaluation.
Args:
params: Parameter dictionary passed from the estimator.
Returns:
features: Dictionary of feature tensors.
features[fields.InputDataFields.image] is a [1, H, W, C] float32 tensor
with preprocessed images.
features[HASH_KEY] is a [1] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [1, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] is a [1, H', W', C]
float32 tensor with the original image.
labels: Dictionary of groundtruth tensors.
labels[fields.InputDataFields.groundtruth_boxes] is a [1, num_boxes, 4]
float32 tensor containing the corners of the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a
[num_boxes, num_classes] float32 one-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_area] is a [1, num_boxes]
float32 tensor containing object areas.
labels[fields.InputDataFields.groundtruth_is_crowd] is a [1, num_boxes]
bool tensor indicating if the boxes enclose a crowd.
labels[fields.InputDataFields.groundtruth_difficult] is a [1, num_boxes]
int32 tensor indicating if the boxes represent difficult instances.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
[1, num_boxes, H, W] float32 tensor containing only binary values,
which represent instance masks for objects.
Raises:
TypeError: if the `eval_config` or `eval_input_config` are not of the
correct type.
"""
del params
if not isinstance(eval_config, eval_pb2.EvalConfig):
raise TypeError('For eval mode, the `eval_config` must be a '
'train_pb2.EvalConfig.')
if not isinstance(eval_input_config, input_reader_pb2.InputReader):
raise TypeError('The `eval_input_config` must be a '
'input_reader_pb2.InputReader.')
if not isinstance(model_config, model_pb2.DetectionModel):
raise TypeError('The `model_config` must be a '
'model_pb2.DetectionModel.')
num_classes = config_util.get_number_of_classes(model_config)
model = model_builder.build(model_config, is_training=False)
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
transform_data_fn = functools.partial(
transform_input_data, model_preprocess_fn=model.preprocess,
image_resizer_fn=image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=None,
retain_original_image=True)
dataset = dataset_builder.build(eval_input_config,
transform_input_data_fn=transform_data_fn)
input_dict = dataset_util.make_initializable_iterator(dataset).get_next()
hash_from_source_id = tf.string_to_hash_bucket_fast(
input_dict[fields.InputDataFields.source_id], HASH_BINS)
features = {
fields.InputDataFields.image:
input_dict[fields.InputDataFields.image],
fields.InputDataFields.original_image:
input_dict[fields.InputDataFields.original_image],
HASH_KEY: tf.cast(hash_from_source_id, tf.int32),
fields.InputDataFields.true_image_shape:
input_dict[fields.InputDataFields.true_image_shape]
}
labels = {
fields.InputDataFields.groundtruth_boxes:
input_dict[fields.InputDataFields.groundtruth_boxes],
fields.InputDataFields.groundtruth_classes:
input_dict[fields.InputDataFields.groundtruth_classes],
fields.InputDataFields.groundtruth_area:
input_dict[fields.InputDataFields.groundtruth_area],
fields.InputDataFields.groundtruth_is_crowd:
input_dict[fields.InputDataFields.groundtruth_is_crowd],
fields.InputDataFields.groundtruth_difficult:
tf.cast(input_dict[fields.InputDataFields.groundtruth_difficult],
tf.int32)
}
if fields.InputDataFields.groundtruth_instance_masks in input_dict:
labels[fields.InputDataFields.groundtruth_instance_masks] = input_dict[
fields.InputDataFields.groundtruth_instance_masks]
# Add a batch dimension to the tensors.
features = {
key: tf.expand_dims(features[key], axis=0)
for key, feature in features.items()
}
labels = {
key: tf.expand_dims(labels[key], axis=0)
for key, label in labels.items()
}
return features, labels
return _eval_input_fn
def create_predict_input_fn(model_config):
"""Creates a predict `input` function for `Estimator`.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
`input_fn` for `Estimator` in PREDICT mode.
"""
def _predict_input_fn(params=None):
"""Decodes serialized tf.Examples and returns `ServingInputReceiver`.
Args:
params: Parameter dictionary passed from the estimator.
Returns:
`ServingInputReceiver`.
"""
del params
example = tf.placeholder(dtype=tf.string, shape=[], name='input_feature')
num_classes = config_util.get_number_of_classes(model_config)
model = model_builder.build(model_config, is_training=False)
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
transform_fn = functools.partial(
transform_input_data, model_preprocess_fn=model.preprocess,
image_resizer_fn=image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=None)
decoder = tf_example_decoder.TfExampleDecoder(load_instance_masks=False)
input_dict = transform_fn(decoder.decode(example))
images = tf.to_float(input_dict[fields.InputDataFields.image])
images = tf.expand_dims(images, axis=0)
return tf.estimator.export.ServingInputReceiver(
features={fields.InputDataFields.image: images},
receiver_tensors={SERVING_FED_EXAMPLE_KEY: example})
return _predict_input_fn
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.tflearn.inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import numpy as np
import tensorflow as tf
from object_detection import inputs
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
FLAGS = tf.flags.FLAGS
def _get_configs_for_model(model_name):
"""Returns configurations for model."""
# TODO: Make sure these tests work fine outside google3.
fname = os.path.join(
FLAGS.test_srcdir,
('google3/third_party/tensorflow_models/'
'object_detection/samples/configs/' + model_name + '.config'))
label_map_path = os.path.join(FLAGS.test_srcdir,
('google3/third_party/tensorflow_models/'
'object_detection/data/pet_label_map.pbtxt'))
data_path = os.path.join(FLAGS.test_srcdir,
('google3/third_party/tensorflow_models/'
'object_detection/test_data/pets_examples.record'))
configs = config_util.get_configs_from_pipeline_file(fname)
return config_util.merge_external_params_with_configs(
configs,
train_input_path=data_path,
eval_input_path=data_path,
label_map_path=label_map_path)
class InputsTest(tf.test.TestCase):
def test_faster_rcnn_resnet50_train_input(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
configs['train_config'].unpad_groundtruth_tensors = True
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = train_input_fn()
self.assertAllEqual([None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[None, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[None, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[None],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_faster_rcnn_resnet50_eval_input(self):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
configs['eval_config'], configs['eval_input_config'], model_config)
features, labels = eval_input_fn()
self.assertAllEqual([1, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[1, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([1], features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, None, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, None, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_ssd_inceptionV2_train_input(self):
"""Tests the training input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
batch_size = configs['train_config'].batch_size
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = train_input_fn()
self.assertAllEqual([batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[batch_size],
labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.num_groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 50, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 50, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[batch_size, 50],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_ssd_inceptionV2_eval_input(self):
"""Tests the eval input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
configs['eval_config'], configs['eval_input_config'], model_config)
features, labels = eval_input_fn()
self.assertAllEqual([1, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[1, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([1], features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, None, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, None, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_predict_input(self):
"""Tests the predict input function."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
self.assertEqual([1, 300, 300, 3], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_error_with_bad_train_config(self):
"""Tests that a TypeError is raised with improper train config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['eval_config'], # Expecting `TrainConfig`.
train_input_config=configs['train_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_input_config(self):
"""Tests that a TypeError is raised with improper train input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_model_config(self):
"""Tests that a TypeError is raised with improper train model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['train_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_eval_config(self):
"""Tests that a TypeError is raised with improper eval config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['train_config'], # Expecting `EvalConfig`.
eval_input_config=configs['eval_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_input_config(self):
"""Tests that a TypeError is raised with improper eval input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_model_config(self):
"""Tests that a TypeError is raised with improper eval model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_config'],
model_config=configs['eval_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
eval_input_fn()
class DataAugmentationFnTest(tf.test.TestCase):
def test_apply_image_and_box_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
def test_include_masks_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
})
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.zeros([2, 10, 10], np.uint8))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3])
self.assertAllEqual(augmented_tensor_dict_out[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 20, 20])
def test_include_keypoints_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_keypoints],
[[[10, 20], [10, 10]]]
)
def _fake_model_preprocessor_fn(image):
return (image, tf.expand_dims(tf.shape(image)[1:], axis=0))
def _fake_image_resizer_fn(image, mask):
return (image, mask, tf.shape(image))
class DataTransformationFnTest(tf.test.TestCase):
def test_returns_correct_class_label_encodings(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[0, 0, 1], [1, 0, 0]])
def test_returns_correct_merged_boxes(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
merge_multiple_boxes=True)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_boxes],
[[.5, .5, 1., 1.]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[1, 0, 1]])
def test_returns_resized_masks(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(2, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_image_resizer_fn(image, masks):
resized_image = tf.image.resize_images(image, [8, 8])
resized_masks = tf.transpose(
tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]),
[2, 0, 1])
return resized_image, resized_masks, tf.shape(resized_image)
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 8, 8])
def test_applies_model_preprocess_fn_to_image_tensor(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_model_preprocessor_fn(image):
return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0))
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(transformed_inputs[fields.InputDataFields.image],
np_image / 255.)
self.assertAllClose(transformed_inputs[fields.InputDataFields.
true_image_shape],
[4, 4, 3])
def test_applies_data_augmentation_fn_to_tensor_dict(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def add_one_data_augmentation_fn(tensor_dict):
return {key: value + 1 for key, value in tensor_dict.items()}
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_one_data_augmentation_fn)
with self.test_session() as sess:
augmented_tensor_dict = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],
np_image + 1)
self.assertAllEqual(
augmented_tensor_dict[fields.InputDataFields.groundtruth_classes],
[[0, 0, 0, 1], [0, 1, 0, 0]])
def test_applies_data_augmentation_fn_before_model_preprocess_fn(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def mul_two_model_preprocessor_fn(image):
return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0))
def add_five_to_image_data_augmentation_fn(tensor_dict):
tensor_dict[fields.InputDataFields.image] += 5
return tensor_dict
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=mul_two_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_five_to_image_data_augmentation_fn)
with self.test_session() as sess:
augmented_tensor_dict = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],
(np_image + 5) * 2)
if __name__ == '__main__':
tf.test.main()
......@@ -14,7 +14,8 @@ py_library(
],
deps = [
"//tensorflow",
"//tensorflow_models/object_detection/core:matcher",
"//tensorflow/models/research/object_detection/core:matcher",
"//tensorflow/models/research/object_detection/utils:shape_utils",
],
)
......@@ -24,6 +25,7 @@ py_test(
deps = [
":argmax_matcher",
"//tensorflow",
"//tensorflow/models/research/object_detection/utils:test_case",
],
)
......@@ -35,7 +37,7 @@ py_library(
deps = [
"//tensorflow",
"//tensorflow/contrib/image:image_py",
"//tensorflow_models/object_detection/core:matcher",
"//tensorflow/models/research/object_detection/core:matcher",
],
)
......
......@@ -26,10 +26,10 @@ This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow as tf
from object_detection.core import matcher
from object_detection.utils import shape_utils
class ArgMaxMatcher(matcher.Matcher):
......@@ -55,7 +55,8 @@ class ArgMaxMatcher(matcher.Matcher):
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False):
force_match_for_each_row=False,
use_matmul_gather=False):
"""Construct ArgMaxMatcher.
Args:
......@@ -74,11 +75,15 @@ class ArgMaxMatcher(matcher.Matcher):
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
super(ArgMaxMatcher, self).__init__(use_matmul_gather=use_matmul_gather)
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
......@@ -119,7 +124,9 @@ class ArgMaxMatcher(matcher.Matcher):
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
return -1 * tf.ones([tf.shape(similarity_matrix)[1]], dtype=tf.int32)
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
......@@ -128,7 +135,7 @@ class ArgMaxMatcher(matcher.Matcher):
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(similarity_matrix, 0)
matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
......@@ -156,20 +163,28 @@ class ArgMaxMatcher(matcher.Matcher):
-1)
if self._force_match_for_each_row:
forced_matches_ids = tf.cast(tf.argmax(similarity_matrix, 1), tf.int32)
# Set matches[forced_matches_ids] = [0, ..., R], R is number of rows.
row_range = tf.range(tf.shape(similarity_matrix)[0])
col_range = tf.range(tf.shape(similarity_matrix)[1])
forced_matches_values = tf.cast(row_range, matches.dtype)
keep_matches_ids, _ = tf.setdiff1d(col_range, forced_matches_ids)
keep_matches_values = tf.gather(matches, keep_matches_ids)
matches = tf.dynamic_stitch(
[forced_matches_ids,
keep_matches_ids], [forced_matches_values, keep_matches_values])
return tf.cast(matches, tf.int32)
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(similarity_matrix, 1,
output_type=tf.int32)
force_match_column_indicators = tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1])
force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(force_match_column_indicators, 0), tf.bool)
final_matches = tf.where(force_match_column_mask,
force_match_row_ids, matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape[0].value == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
tf.greater(tf.shape(similarity_matrix)[0], 0),
_match_when_rows_are_non_empty, _match_when_rows_are_empty)
......
......@@ -19,177 +19,168 @@ import numpy as np
import tensorflow as tf
from object_detection.matchers import argmax_matcher
from object_detection.utils import test_case
class ArgMaxMatcherTest(tf.test.TestCase):
class ArgMaxMatcherTest(test_case.TestCase):
def test_return_correct_matches_with_default_thresholds(self):
similarity = np.array([[1., 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]])
def graph_fn(similarity_matrix):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
expected_matched_rows = np.array([2, 0, 1, 0, 1])
match = matcher.match(similarity_matrix)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
similarity = np.array([[1., 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_rows = np.array([2, 0, 1, 0, 1])
(res_matched_cols, res_unmatched_cols,
res_match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, np.arange(similarity.shape[1]))
self.assertEmpty(res_unmatched_cols)
self.assertAllEqual(res_match_results[res_matched_cols],
expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], [0, 1, 2, 3, 4])
self.assertFalse(np.all(res_unmatched_cols))
def test_return_correct_matches_with_empty_rows(self):
def graph_fn(similarity_matrix):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
sim = 0.2*tf.ones([0, 5])
match = matcher.match(sim)
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_unmatched_cols, np.arange(5))
match = matcher.match(similarity_matrix)
return match.unmatched_column_indicator()
similarity = 0.2 * np.ones([0, 5], dtype=np.float32)
res_unmatched_cols = self.execute(graph_fn, [similarity])
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], np.arange(5))
def test_return_correct_matches_with_matched_threshold(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3)
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1, 2])
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
init_op = tf.global_variables_initializer()
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
with self.test_session() as sess:
sess.run(init_op)
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
def test_return_correct_matches_with_matched_and_unmatched_threshold(self):
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
def test_return_correct_matches_with_matched_and_unmatched_threshold(self):
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2)
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1]) # col 2 has too high maximum val
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
def test_return_correct_matches_negatives_lower_than_unmatched_false(self):
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(
matched_threshold=3.,
unmatched_threshold=2.,
negatives_lower_than_unmatched=False)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
def test_return_correct_matches_negatives_lower_than_unmatched_false(self):
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2,
negatives_lower_than_unmatched=False)
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([2]) # col 1 has too low maximum val
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
def test_return_correct_matches_unmatched_row_not_using_force_match(self):
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
def test_return_correct_matches_unmatched_row_not_using_force_match(self):
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2)
[3, 0, -1, 2, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3])
expected_matched_rows = np.array([2, 0])
expected_unmatched_cols = np.array([1, 2, 4])
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_while_using_force_match(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.,
force_match_for_each_row=True)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2,
force_match_for_each_row=True)
[3, 0, -1, 2, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 1, 3])
expected_matched_rows = np.array([2, 1, 0])
expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_valid_arguments_corner_case(self):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
......@@ -211,27 +202,6 @@ class ArgMaxMatcherTest(tf.test.TestCase):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=2)
def test_set_values_using_indicator(self):
input_a = np.array([3, 4, 5, 1, 4, 3, 2])
expected_b = np.array([3, 0, 0, 1, 0, 3, 2]) # Set a>3 to 0
expected_c = np.array(
[3., 4., 5., -1., 4., 3., -1.]) # Set a<3 to -1. Float32
idxb_ = input_a > 3
idxc_ = input_a < 3
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
a = tf.constant(input_a)
idxb = tf.constant(idxb_)
idxc = tf.constant(idxc_)
b = matcher._set_values_using_indicator(a, idxb, 0)
c = matcher._set_values_using_indicator(tf.cast(a, tf.float32), idxc, -1)
with self.test_session() as sess:
res_b = sess.run(b)
res_c = sess.run(c)
self.assertAllEqual(res_b, expected_b)
self.assertAllEqual(res_c, expected_c)
if __name__ == '__main__':
tf.test.main()
......@@ -24,11 +24,22 @@ from object_detection.core import matcher
class GreedyBipartiteMatcher(matcher.Matcher):
"""Wraps a Tensorflow greedy bipartite matcher."""
def __init__(self, use_matmul_gather=False):
"""Constructs a Matcher.
Args:
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
"""
super(GreedyBipartiteMatcher, self).__init__(
use_matmul_gather=use_matmul_gather)
def _match(self, similarity_matrix, num_valid_rows=-1):
"""Bipartite matches a collection rows and columns. A greedy bi-partite.
TODO: Add num_valid_columns options to match only that many columns with
all the rows.
TODO: Add num_valid_columns options to match only that many columns
with all the rows.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
......
......@@ -13,12 +13,14 @@ py_library(
srcs = ["ssd_meta_arch.py"],
deps = [
"//tensorflow",
"//tensorflow_models/object_detection/core:box_list",
"//tensorflow_models/object_detection/core:box_predictor",
"//tensorflow_models/object_detection/core:model",
"//tensorflow_models/object_detection/core:target_assigner",
"//tensorflow_models/object_detection/utils:shape_utils",
"//tensorflow_models/object_detection/utils:visualization_utils",
"//tensorflow/models/research/object_detection/core:box_list",
"//tensorflow/models/research/object_detection/core:box_predictor",
"//tensorflow/models/research/object_detection/core:model",
"//tensorflow/models/research/object_detection/core:target_assigner",
"//tensorflow/models/research/object_detection/utils:ops",
"//tensorflow/models/research/object_detection/utils:shape_utils",
"//tensorflow/models/research/object_detection/utils:test_case",
"//tensorflow/models/research/object_detection/utils:visualization_utils",
],
)
......@@ -28,13 +30,12 @@ py_test(
deps = [
":ssd_meta_arch",
"//tensorflow",
"//tensorflow/python:training",
"//tensorflow_models/object_detection/core:anchor_generator",
"//tensorflow_models/object_detection/core:box_list",
"//tensorflow_models/object_detection/core:losses",
"//tensorflow_models/object_detection/core:post_processing",
"//tensorflow_models/object_detection/core:region_similarity_calculator",
"//tensorflow_models/object_detection/utils:test_utils",
"//tensorflow/models/research/object_detection/core:anchor_generator",
"//tensorflow/models/research/object_detection/core:box_list",
"//tensorflow/models/research/object_detection/core:losses",
"//tensorflow/models/research/object_detection/core:post_processing",
"//tensorflow/models/research/object_detection/core:region_similarity_calculator",
"//tensorflow/models/research/object_detection/utils:test_utils",
],
)
......@@ -45,18 +46,18 @@ py_library(
],
deps = [
"//tensorflow",
"//tensorflow_models/object_detection/anchor_generators:grid_anchor_generator",
"//tensorflow_models/object_detection/core:balanced_positive_negative_sampler",
"//tensorflow_models/object_detection/core:box_list",
"//tensorflow_models/object_detection/core:box_list_ops",
"//tensorflow_models/object_detection/core:box_predictor",
"//tensorflow_models/object_detection/core:losses",
"//tensorflow_models/object_detection/core:model",
"//tensorflow_models/object_detection/core:post_processing",
"//tensorflow_models/object_detection/core:standard_fields",
"//tensorflow_models/object_detection/core:target_assigner",
"//tensorflow_models/object_detection/utils:ops",
"//tensorflow_models/object_detection/utils:shape_utils",
"//tensorflow/models/research/object_detection/anchor_generators:grid_anchor_generator",
"//tensorflow/models/research/object_detection/core:balanced_positive_negative_sampler",
"//tensorflow/models/research/object_detection/core:box_list",
"//tensorflow/models/research/object_detection/core:box_list_ops",
"//tensorflow/models/research/object_detection/core:box_predictor",
"//tensorflow/models/research/object_detection/core:losses",
"//tensorflow/models/research/object_detection/core:model",
"//tensorflow/models/research/object_detection/core:post_processing",
"//tensorflow/models/research/object_detection/core:standard_fields",
"//tensorflow/models/research/object_detection/core:target_assigner",
"//tensorflow/models/research/object_detection/utils:ops",
"//tensorflow/models/research/object_detection/utils:shape_utils",
],
)
......@@ -68,14 +69,14 @@ py_library(
deps = [
":faster_rcnn_meta_arch",
"//tensorflow",
"//tensorflow_models/object_detection/anchor_generators:grid_anchor_generator",
"//tensorflow_models/object_detection/builders:box_predictor_builder",
"//tensorflow_models/object_detection/builders:hyperparams_builder",
"//tensorflow_models/object_detection/builders:post_processing_builder",
"//tensorflow_models/object_detection/core:losses",
"//tensorflow_models/object_detection/protos:box_predictor_py_pb2",
"//tensorflow_models/object_detection/protos:hyperparams_py_pb2",
"//tensorflow_models/object_detection/protos:post_processing_py_pb2",
"//tensorflow/models/research/object_detection/anchor_generators:grid_anchor_generator",
"//tensorflow/models/research/object_detection/builders:box_predictor_builder",
"//tensorflow/models/research/object_detection/builders:hyperparams_builder",
"//tensorflow/models/research/object_detection/builders:post_processing_builder",
"//tensorflow/models/research/object_detection/core:losses",
"//tensorflow/models/research/object_detection/protos:box_predictor_py_pb2",
"//tensorflow/models/research/object_detection/protos:hyperparams_py_pb2",
"//tensorflow/models/research/object_detection/protos:post_processing_py_pb2",
],
)
......@@ -93,8 +94,8 @@ py_library(
deps = [
":faster_rcnn_meta_arch",
"//tensorflow",
"//tensorflow_models/object_detection/core:box_predictor",
"//tensorflow_models/object_detection/utils:ops",
"//tensorflow/models/research/object_detection/core:box_predictor",
"//tensorflow/models/research/object_detection/utils:ops",
],
)
......
......@@ -21,13 +21,17 @@ See Faster R-CNN: Ren, Shaoqing, et al.
"Faster R-CNN: Towards real-time object detection with region proposal
networks." Advances in neural information processing systems. 2015.
We allow for two modes: first_stage_only=True and first_stage_only=False. In
the former setting, all of the user facing methods (e.g., predict, postprocess,
loss) can be used as if the model consisted only of the RPN, returning class
agnostic proposals (these can be thought of as approximate detections with no
associated class information). In the latter setting, proposals are computed,
then passed through a second stage "box classifier" to yield (multi-class)
detections.
We allow for three modes: number_of_stages={1, 2, 3}. In case of 1 stage,
all of the user facing methods (e.g., predict, postprocess, loss) can be used as
if the model consisted only of the RPN, returning class agnostic proposals
(these can be thought of as approximate detections with no associated class
information). In case of 2 stages, proposals are computed, then passed
through a second stage "box classifier" to yield (multi-class) detections.
Finally, in case of 3 stages which is only used during eval, proposals are
computed, then passed through a second stage "box classifier" that will compute
refined boxes and classes, and then features are pooled from the refined and
non-maximum suppressed boxes and are passed through the box classifier again. If
number of stages is 3 during training it will be reduced to two automatically.
Implementations of Faster R-CNN models must define a new
FasterRCNNFeatureExtractor and override three methods: `preprocess`,
......@@ -62,6 +66,32 @@ Following the API (see model.DetectionModel definition), our outputs after
postprocessing operations are always normalized boxes however, internally, we
sometimes convert to absolute --- e.g. for loss computation. In particular,
anchors and proposal_boxes are both represented as absolute coordinates.
Images are resized in the `preprocess` method.
The Faster R-CNN meta architecture has two post-processing methods
`_postprocess_rpn` which is applied after first stage and
`_postprocess_box_classifier` which is applied after second stage. There are
three different ways post-processing can happen depending on number_of_stages
configured in the meta architecture:
1. When number_of_stages is 1:
`_postprocess_rpn` is run as part of the `postprocess` method where
true_image_shapes is used to clip proposals, perform non-max suppression and
normalize them.
2. When number of stages is 2:
`_postprocess_rpn` is run as part of the `_predict_second_stage` method where
`resized_image_shapes` is used to clip proposals, perform non-max suppression
and normalize them. In this case `postprocess` method skips `_postprocess_rpn`
and only runs `_postprocess_box_classifier` using `true_image_shapes` to clip
detections, perform non-max suppression and normalize them.
3. When number of stages is 3:
`_postprocess_rpn` is run as part of the `_predict_second_stage` using
`resized_image_shapes` to clip proposals, perform non-max suppression and
normalize them. Subsequently, `_postprocess_box_classifier` is run as part of
`_predict_third_stage` using `true_image_shapes` to clip detections, peform
non-max suppression and normalize them. In this case, the `postprocess` method
skips both `_postprocess_rpn` and `_postprocess_box_classifier`.
"""
from abc import abstractmethod
from functools import partial
......@@ -152,7 +182,8 @@ class FasterRCNNFeatureExtractor(object):
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
with tf.variable_scope(scope, values=[proposal_feature_maps]):
with tf.variable_scope(
scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE):
return self._extract_box_classifier_features(proposal_feature_maps, scope)
@abstractmethod
......@@ -194,7 +225,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
num_classes,
image_resizer_fn,
feature_extractor,
first_stage_only,
number_of_stages,
first_stage_anchor_generator,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope,
......@@ -220,7 +251,8 @@ class FasterRCNNMetaArch(model.DetectionModel):
second_stage_classification_loss,
second_stage_mask_prediction_loss_weight=1.0,
hard_example_miner=None,
parallel_iterations=16):
parallel_iterations=16,
add_summaries=True):
"""FasterRCNNMetaArch Constructor.
Args:
......@@ -232,12 +264,22 @@ class FasterRCNNMetaArch(model.DetectionModel):
assigned classification targets can range from {0,... K}).
image_resizer_fn: A callable for image resizing. This callable
takes a rank-3 image tensor of shape [height, width, channels]
(corresponding to a single image) and returns a rank-3 image tensor,
possibly with new spatial dimensions. See
builders/image_resizer_builder.py.
(corresponding to a single image), an optional rank-3 instance mask
tensor of shape [num_masks, height, width] and returns a resized rank-3
image tensor, a resized mask tensor if one was provided in the input. In
addition this callable must also return a 1-D tensor of the form
[height, width, channels] containing the size of the true image, as the
image resizer can perform zero padding. See protos/image_resizer.proto.
feature_extractor: A FasterRCNNFeatureExtractor object.
first_stage_only: Whether to construct only the Region Proposal Network
(RPN) part of the model.
number_of_stages: An integer values taking values in {1, 2, 3}. If
1, the function will construct only the Region Proposal Network (RPN)
part of the model. If 2, the function will perform box refinement and
other auxiliary predictions all in the second stage. If 3, it will
extract features from refined boxes and perform the auxiliary
predictions on the non-maximum suppressed refined boxes.
If is_training is true and the value of number_of_stages is 3, it is
reduced to 2 since all the model heads are trained in parallel in second
stage during training.
first_stage_anchor_generator: An anchor_generator.AnchorGenerator object
(note that currently we only support
grid_anchor_generator.GridAnchorGenerator objects)
......@@ -314,12 +356,17 @@ class FasterRCNNMetaArch(model.DetectionModel):
hard_example_miner: A losses.HardExampleMiner object (can be None).
parallel_iterations: (Optional) The number of iterations allowed to run
in parallel for calls to tf.map_fn.
add_summaries: boolean (default: True) controlling whether summary ops
should be added to tensorflow graph.
Raises:
ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at
training time.
ValueError: If first_stage_anchor_generator is not of type
grid_anchor_generator.GridAnchorGenerator.
"""
# TODO: add_summaries is currently unused. Respect that directive
# in the future.
super(FasterRCNNMetaArch, self).__init__(num_classes=num_classes)
if is_training and second_stage_batch_size > first_stage_max_proposals:
......@@ -333,7 +380,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
self._is_training = is_training
self._image_resizer_fn = image_resizer_fn
self._feature_extractor = feature_extractor
self._first_stage_only = first_stage_only
self._number_of_stages = number_of_stages
# The first class is reserved as background.
unmatched_cls_target = tf.constant(
......@@ -368,9 +415,9 @@ class FasterRCNNMetaArch(model.DetectionModel):
self._first_stage_max_proposals = first_stage_max_proposals
self._first_stage_localization_loss = (
losses.WeightedSmoothL1LocalizationLoss(anchorwise_output=True))
losses.WeightedSmoothL1LocalizationLoss())
self._first_stage_objectness_loss = (
losses.WeightedSoftmaxClassificationLoss(anchorwise_output=True))
losses.WeightedSoftmaxClassificationLoss())
self._first_stage_loc_loss_weight = first_stage_localization_loss_weight
self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight
......@@ -389,10 +436,10 @@ class FasterRCNNMetaArch(model.DetectionModel):
self._second_stage_score_conversion_fn = second_stage_score_conversion_fn
self._second_stage_localization_loss = (
losses.WeightedSmoothL1LocalizationLoss(anchorwise_output=True))
losses.WeightedSmoothL1LocalizationLoss())
self._second_stage_classification_loss = second_stage_classification_loss
self._second_stage_mask_loss = (
losses.WeightedSigmoidClassificationLoss(anchorwise_output=True))
losses.WeightedSigmoidClassificationLoss())
self._second_stage_loc_loss_weight = second_stage_localization_loss_weight
self._second_stage_cls_loss_weight = second_stage_classification_loss_weight
self._second_stage_mask_loss_weight = (
......@@ -400,6 +447,11 @@ class FasterRCNNMetaArch(model.DetectionModel):
self._hard_example_miner = hard_example_miner
self._parallel_iterations = parallel_iterations
if self._number_of_stages <= 0 or self._number_of_stages > 3:
raise ValueError('Number of stages should be a value in {1, 2, 3}.')
if self._is_training and self._number_of_stages == 3:
self._number_of_stages = 2
@property
def first_stage_feature_extractor_scope(self):
return 'FirstStageFeatureExtractor'
......@@ -432,6 +484,14 @@ class FasterRCNNMetaArch(model.DetectionModel):
return self._second_stage_batch_size
return self._first_stage_max_proposals
@property
def anchors(self):
if not self._anchors:
raise RuntimeError('anchors have not been constructed yet!')
if not isinstance(self._anchors, box_list.BoxList):
raise RuntimeError('anchors should be a BoxList object, but is not.')
return self._anchors
def preprocess(self, inputs):
"""Feature-extractor specific preprocessing.
......@@ -448,24 +508,53 @@ class FasterRCNNMetaArch(model.DetectionModel):
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Raises:
ValueError: if inputs tensor does not have type tf.float32
"""
if inputs.dtype is not tf.float32:
raise ValueError('`preprocess` expects a tf.float32 tensor')
with tf.name_scope('Preprocessor'):
resized_inputs = tf.map_fn(self._image_resizer_fn,
outputs = shape_utils.static_or_dynamic_map_fn(
self._image_resizer_fn,
elems=inputs,
dtype=tf.float32,
dtype=[tf.float32, tf.int32],
parallel_iterations=self._parallel_iterations)
return self._feature_extractor.preprocess(resized_inputs)
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def predict(self, preprocessed_inputs):
def _compute_clip_window(self, image_shapes):
"""Computes clip window for non max suppression based on image shapes.
This function assumes that the clip window's left top corner is at (0, 0).
Args:
image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing
shapes of images in the batch. Each row represents [height, width,
channels] of an image.
Returns:
A 2-D float32 tensor of shape [batch_size, 4] containing the clip window
for each image in the form [ymin, xmin, ymax, xmax].
"""
clip_heights = image_shapes[:, 0]
clip_widths = image_shapes[:, 1]
clip_window = tf.to_float(tf.stack([tf.zeros_like(clip_heights),
tf.zeros_like(clip_heights),
clip_heights, clip_widths], axis=1))
return clip_window
def predict(self, preprocessed_inputs, true_image_shapes):
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the
forward pass of the network to yield "raw" un-postprocessed predictions.
If `first_stage_only` is True, this function only returns first stage
If `number_of_stages` is 1, this function only returns first stage
RPN predictions (un-postprocessed). Otherwise it returns both
first stage RPN predictions as well as second stage box classifier
predictions.
......@@ -481,6 +570,10 @@ class FasterRCNNMetaArch(model.DetectionModel):
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
......@@ -504,7 +597,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
`num_anchors` can differ depending on whether the model is created in
training or inference mode.
(and if first_stage_only=False):
(and if number_of_stages=1):
7) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, 4] representing predicted
(final) refined box encodings, where
......@@ -526,6 +619,9 @@ class FasterRCNNMetaArch(model.DetectionModel):
11) mask_predictions: (optional) a 4-D tensor with shape
[total_num_padded_proposals, num_classes, mask_height, mask_width]
containing instance mask predictions.
Raises:
ValueError: If `predict` is called before `preprocess`.
"""
(rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist,
image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs)
......@@ -544,7 +640,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window)
anchors = anchors_boxlist.get()
self._anchors = anchors_boxlist
prediction_dict = {
'rpn_box_predictor_features': rpn_box_predictor_features,
'rpn_features_to_crop': rpn_features_to_crop,
......@@ -552,22 +648,46 @@ class FasterRCNNMetaArch(model.DetectionModel):
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'anchors': anchors
'anchors': self._anchors.get()
}
if not self._first_stage_only:
if self._number_of_stages >= 2:
prediction_dict.update(self._predict_second_stage(
rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop,
anchors, image_shape))
self._anchors.get(), image_shape, true_image_shapes))
if self._number_of_stages == 3:
prediction_dict = self._predict_third_stage(
prediction_dict, true_image_shapes)
return prediction_dict
def _image_batch_shape_2d(self, image_batch_shape_1d):
"""Takes a 1-D image batch shape tensor and converts it to a 2-D tensor.
Example:
If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D
image batch tensor would be [[300, 300, 3], [300, 300, 3]]
Args:
image_batch_shape_1d: 1-D tensor of the form [batch_size, height,
width, channels].
Returns:
image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is
of the form [height, width, channels].
"""
return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0),
[image_batch_shape_1d[0], 1])
def _predict_second_stage(self, rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop,
anchors,
image_shape):
image_shape,
true_image_shapes):
"""Predicts the output tensors from second stage of Faster R-CNN.
Args:
......@@ -584,6 +704,10 @@ class FasterRCNNMetaArch(model.DetectionModel):
anchors: 2-D float tensor of shape
[num_anchors, self._box_coder.code_size].
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
......@@ -617,9 +741,10 @@ class FasterRCNNMetaArch(model.DetectionModel):
[total_num_padded_proposals, num_classes, mask_height, mask_width]
containing instance mask predictions.
"""
image_shape_2d = self._image_batch_shape_2d(image_shape)
proposal_boxes_normalized, _, num_proposals = self._postprocess_rpn(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors, image_shape)
anchors, image_shape_2d, true_image_shapes)
flattened_proposal_feature_maps = (
self._compute_second_stage_input_feature_maps(
......@@ -630,10 +755,16 @@ class FasterRCNNMetaArch(model.DetectionModel):
flattened_proposal_feature_maps,
scope=self.second_stage_feature_extractor_scope))
predict_auxiliary_outputs = False
if self._number_of_stages == 2:
predict_auxiliary_outputs = True
box_predictions = self._mask_rcnn_box_predictor.predict(
box_classifier_features,
num_predictions_per_location=1,
scope=self.second_stage_box_predictor_scope)
[box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
predict_boxes_and_classes=True,
predict_auxiliary_outputs=predict_auxiliary_outputs)
refined_box_encodings = tf.squeeze(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.squeeze(box_predictions[
......@@ -658,6 +789,100 @@ class FasterRCNNMetaArch(model.DetectionModel):
return prediction_dict
def _predict_third_stage(self, prediction_dict, image_shapes):
"""Predicts non-box, non-class outputs using refined detections.
Args:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, 4] representing predicted
(final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals
2) class_predictions_with_background: a 3-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
4) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
image_shapes: A 2-D int32 tensors of shape [batch_size, 3] containing
shapes of images in the batch.
Returns:
prediction_dict: a dictionary that in addition to the input predictions
does hold the following predictions as well:
1) mask_predictions: (optional) a 4-D tensor with shape
[batch_size, max_detection, mask_height, mask_width] containing
instance mask predictions.
"""
detections_dict = self._postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
image_shapes)
prediction_dict.update(detections_dict)
detection_boxes = detections_dict[
fields.DetectionResultFields.detection_boxes]
detection_classes = detections_dict[
fields.DetectionResultFields.detection_classes]
rpn_features_to_crop = prediction_dict['rpn_features_to_crop']
batch_size = tf.shape(detection_boxes)[0]
max_detection = tf.shape(detection_boxes)[1]
flattened_detected_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, detection_boxes))
detected_box_classifier_features = (
self._feature_extractor.extract_box_classifier_features(
flattened_detected_feature_maps,
scope=self.second_stage_feature_extractor_scope))
box_predictions = self._mask_rcnn_box_predictor.predict(
[detected_box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
predict_boxes_and_classes=False,
predict_auxiliary_outputs=True)
if box_predictor.MASK_PREDICTIONS in box_predictions:
detection_masks = tf.squeeze(box_predictions[
box_predictor.MASK_PREDICTIONS], axis=1)
detection_masks = self._gather_instance_masks(detection_masks,
detection_classes)
mask_height = tf.shape(detection_masks)[1]
mask_width = tf.shape(detection_masks)[2]
prediction_dict[fields.DetectionResultFields.detection_masks] = (
tf.reshape(detection_masks,
[batch_size, max_detection, mask_height, mask_width]))
return prediction_dict
def _gather_instance_masks(self, instance_masks, classes):
"""Gathers the masks that correspond to classes.
Args:
instance_masks: A 4-D float32 tensor with shape
[K, num_classes, mask_height, mask_width].
classes: A 2-D int32 tensor with shape [batch_size, max_detection].
Returns:
masks: a 3-D float32 tensor with shape [K, mask_height, mask_width].
"""
k = tf.shape(instance_masks)[0]
num_mask_classes = tf.shape(instance_masks)[1]
instance_mask_height = tf.shape(instance_masks)[2]
instance_mask_width = tf.shape(instance_masks)[3]
classes = tf.reshape(classes, [-1])
instance_masks = tf.reshape(instance_masks, [
-1, instance_mask_height, instance_mask_width
])
return tf.gather(instance_masks,
tf.range(k) * num_mask_classes + tf.to_int32(classes))
def _extract_rpn_feature_maps(self, preprocessed_inputs):
"""Extracts RPN features.
......@@ -728,8 +953,8 @@ class FasterRCNNMetaArch(model.DetectionModel):
raise RuntimeError('anchor_generator is expected to generate anchors '
'corresponding to a single feature map.')
box_predictions = self._first_stage_box_predictor.predict(
rpn_box_predictor_features,
num_anchors_per_location[0],
[rpn_box_predictor_features],
num_anchors_per_location,
scope=self.first_stage_box_predictor_scope)
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
......@@ -776,7 +1001,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window(
anchors_boxlist, clip_window)
def _batch_gather_kept_indices(predictions_tensor):
return tf.map_fn(
return shape_utils.static_or_dynamic_map_fn(
partial(tf.gather, indices=keep_indices),
elems=predictions_tensor,
dtype=tf.float32,
......@@ -804,7 +1029,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
combined_shape[2:])
return tf.reshape(inputs, flattened_shape)
def postprocess(self, prediction_dict):
def postprocess(self, prediction_dict, true_image_shapes):
"""Convert prediction tensors to final detections.
This function converts raw predictions tensors to final detection results.
......@@ -812,20 +1037,24 @@ class FasterRCNNMetaArch(model.DetectionModel):
scores are to be interpreted as logits, but if a score_converter is used,
then scores are remapped (and may thus have a different interpretation).
If first_stage_only=True, the returned results represent proposals from the
If number_of_stages=1, the returned results represent proposals from the
first stage RPN and are padded to have self.max_num_proposals for each
image; otherwise, the results can be interpreted as multiclass detections
from the full two-stage model and are padded to self._max_detections.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict method. If first_stage_only=True, we
documentation for the predict method. If number_of_stages=1, we
expect prediction_dict to contain `rpn_box_encodings`,
`rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,
`image_shape`, and `anchors` fields. Otherwise we expect
prediction_dict to additionally contain `refined_box_encodings`,
and `anchors` fields. Otherwise we expect prediction_dict to
additionally contain `refined_box_encodings`,
`class_predictions_with_background`, `num_proposals`,
`proposal_boxes` and, optionally, `mask_predictions` fields.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
detections: a dictionary containing the following fields
......@@ -834,36 +1063,55 @@ class FasterRCNNMetaArch(model.DetectionModel):
detection_classes: [batch, max_detections]
(this entry is only created if rpn_mode=False)
num_detections: [batch]
Raises:
ValueError: If `predict` is called before `preprocess`.
"""
with tf.name_scope('FirstStagePostprocessor'):
image_shape = prediction_dict['image_shape']
if self._first_stage_only:
if self._number_of_stages == 1:
proposal_boxes, proposal_scores, num_proposals = self._postprocess_rpn(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'],
image_shape)
true_image_shapes,
true_image_shapes)
return {
'detection_boxes': proposal_boxes,
'detection_scores': proposal_scores,
'num_detections': tf.to_float(num_proposals)
fields.DetectionResultFields.detection_boxes: proposal_boxes,
fields.DetectionResultFields.detection_scores: proposal_scores,
fields.DetectionResultFields.num_detections:
tf.to_float(num_proposals),
}
with tf.name_scope('SecondStagePostprocessor'):
if self._number_of_stages == 2:
mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS)
detections_dict = self._postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
image_shape,
true_image_shapes,
mask_predictions=mask_predictions)
return detections_dict
if self._number_of_stages == 3:
# Post processing is already performed in 3rd stage. We need to transfer
# postprocessed tensors from `prediction_dict` to `detections_dict`.
detections_dict = {}
for key in prediction_dict:
if key == fields.DetectionResultFields.detection_masks:
detections_dict[key] = tf.sigmoid(prediction_dict[key])
elif 'detection' in key:
detections_dict[key] = prediction_dict[key]
return detections_dict
def _postprocess_rpn(self,
rpn_box_encodings_batch,
rpn_objectness_predictions_with_background_batch,
anchors,
image_shape):
image_shapes,
true_image_shapes):
"""Converts first stage prediction tensors from the RPN to proposals.
This function decodes the raw RPN predictions, runs non-max suppression
......@@ -885,7 +1133,12 @@ class FasterRCNNMetaArch(model.DetectionModel):
anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN. Note that `num_anchors` can differ depending
on whether the model is created in training or inference mode.
image_shape: A 1-D tensor representing the input image shape.
image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of
images in the batch.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
proposal_boxes: A float tensor with shape
......@@ -909,7 +1162,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
proposal_boxes = tf.squeeze(proposal_boxes, axis=2)
rpn_objectness_softmax_without_background = tf.nn.softmax(
rpn_objectness_predictions_with_background_batch)[:, :, 1]
clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]]))
clip_window = self._compute_clip_window(image_shapes)
(proposal_boxes, proposal_scores, _, _, _,
num_proposals) = post_processing.batch_multiclass_non_max_suppression(
tf.expand_dims(proposal_boxes, axis=2),
......@@ -924,19 +1177,22 @@ class FasterRCNNMetaArch(model.DetectionModel):
proposal_boxes = tf.stop_gradient(proposal_boxes)
if not self._hard_example_miner:
(groundtruth_boxlists, groundtruth_classes_with_background_list,
_) = self._format_groundtruth_data(image_shape)
_) = self._format_groundtruth_data(true_image_shapes)
(proposal_boxes, proposal_scores,
num_proposals) = self._unpad_proposals_and_sample_box_classifier_batch(
proposal_boxes, proposal_scores, num_proposals,
groundtruth_boxlists, groundtruth_classes_with_background_list)
# normalize proposal boxes
proposal_boxes_reshaped = tf.reshape(proposal_boxes, [-1, 4])
normalized_proposal_boxes_reshaped = box_list_ops.to_normalized_coordinates(
box_list.BoxList(proposal_boxes_reshaped),
image_shape[1], image_shape[2], check_range=False).get()
proposal_boxes = tf.reshape(normalized_proposal_boxes_reshaped,
[-1, proposal_boxes.shape[1].value, 4])
return proposal_boxes, proposal_scores, num_proposals
def normalize_boxes(args):
proposal_boxes_per_image = args[0]
image_shape = args[1]
normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(
box_list.BoxList(proposal_boxes_per_image), image_shape[0],
image_shape[1], check_range=False).get()
return normalized_boxes_per_image
normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn(
normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32)
return normalized_proposal_boxes, proposal_scores, num_proposals
def _unpad_proposals_and_sample_box_classifier_batch(
self,
......@@ -951,7 +1207,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
proposal_boxes: A float tensor with shape
[batch_size, num_proposals, 4] representing the (potentially zero
padded) proposal boxes for all images in the batch. These boxes are
represented as normalized coordinates.
represented in absolute coordinates.
proposal_scores: A float tensor with shape
[batch_size, num_proposals] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
......@@ -968,7 +1224,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
proposal_boxes: A float tensor with shape
[batch_size, second_stage_batch_size, 4] representing the (potentially
zero padded) proposal boxes for all images in the batch. These boxes
are represented as normalized coordinates.
are represented in absolute coordinates.
proposal_scores: A float tensor with shape
[batch_size, second_stage_batch_size] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
......@@ -1022,7 +1278,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
tf.stack(single_image_proposal_score_sample),
tf.stack(single_image_num_proposals_sample))
def _format_groundtruth_data(self, image_shape):
def _format_groundtruth_data(self, true_image_shapes):
"""Helper function for preparing groundtruth data for target assignment.
In order to be consistent with the model.DetectionModel interface,
......@@ -1035,8 +1291,10 @@ class FasterRCNNMetaArch(model.DetectionModel):
image_shape.
Args:
image_shape: A 1-D int32 tensor of shape [4] representing the shape of the
input image batch.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates
......@@ -1050,8 +1308,11 @@ class FasterRCNNMetaArch(model.DetectionModel):
"""
groundtruth_boxlists = [
box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shape[1], image_shape[2])
for boxes in self.groundtruth_lists(fields.BoxListFields.boxes)]
box_list.BoxList(boxes), true_image_shapes[i, 0],
true_image_shapes[i, 1])
for i, boxes in enumerate(
self.groundtruth_lists(fields.BoxListFields.boxes))
]
groundtruth_classes_with_background_list = [
tf.to_float(
tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'))
......@@ -1063,12 +1324,16 @@ class FasterRCNNMetaArch(model.DetectionModel):
if groundtruth_masks_list is not None:
resized_masks_list = []
for mask in groundtruth_masks_list:
resized_4d_mask = tf.image.resize_images(
tf.expand_dims(mask, axis=3),
image_shape[1:3],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
resized_masks_list.append(tf.squeeze(resized_4d_mask, axis=3))
_, resized_mask, _ = self._image_resizer_fn(
# Reuse the given `image_resizer_fn` to resize groundtruth masks.
# `mask` tensor for an image is of the shape [num_masks,
# image_height, image_width]. Below we create a dummy image of the
# the shape [image_height, image_width, 1] to use with
# `image_resizer_fn`.
image=tf.zeros(tf.stack([tf.shape(mask)[1], tf.shape(mask)[2], 1])),
masks=mask)
resized_masks_list.append(resized_mask)
groundtruth_masks_list = resized_masks_list
return (groundtruth_boxlists, groundtruth_classes_with_background_list,
......@@ -1152,7 +1417,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
class_predictions_with_background,
proposal_boxes,
num_proposals,
image_shape,
image_shapes,
mask_predictions=None):
"""Converts predictions from the second stage box classifier to detections.
......@@ -1169,7 +1434,8 @@ class FasterRCNNMetaArch(model.DetectionModel):
bounding boxes in absolute coordinates.
num_proposals: a 1-D int32 tensor of shape [batch] representing the number
of proposals predicted for each image in the batch.
image_shape: a 1-D int32 tensor representing the input image shape.
image_shapes: a 2-D int32 tensor containing shapes of input image in the
batch.
mask_predictions: (optional) a 4-D float tensor with shape
[total_num_padded_proposals, num_classes, mask_height, mask_width]
containing instance mask prediction logits.
......@@ -1202,8 +1468,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
tf.slice(class_predictions_with_background_batch,
[0, 0, 1], [-1, -1, -1]),
[-1, self.max_num_proposals, self.num_classes])
clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]]))
clip_window = self._compute_clip_window(image_shapes)
mask_predictions_batch = None
if mask_predictions is not None:
mask_height = mask_predictions.shape[2].value
......@@ -1220,12 +1485,14 @@ class FasterRCNNMetaArch(model.DetectionModel):
change_coordinate_frame=True,
num_valid_boxes=num_proposals,
masks=mask_predictions_batch)
detections = {'detection_boxes': nmsed_boxes,
'detection_scores': nmsed_scores,
'detection_classes': nmsed_classes,
'num_detections': tf.to_float(num_detections)}
detections = {
fields.DetectionResultFields.detection_boxes: nmsed_boxes,
fields.DetectionResultFields.detection_scores: nmsed_scores,
fields.DetectionResultFields.detection_classes: nmsed_classes,
fields.DetectionResultFields.num_detections: tf.to_float(num_detections)
}
if nmsed_masks is not None:
detections['detection_masks'] = nmsed_masks
detections[fields.DetectionResultFields.detection_masks] = nmsed_masks
return detections
def _batch_decode_boxes(self, box_encodings, anchor_boxes):
......@@ -1257,22 +1524,26 @@ class FasterRCNNMetaArch(model.DetectionModel):
tf.stack([combined_shape[0], combined_shape[1],
num_classes, 4]))
def loss(self, prediction_dict, scope=None):
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Compute scalar loss tensors given prediction tensors.
If first_stage_only=True, only RPN related losses are computed (i.e.,
If number_of_stages=1, only RPN related losses are computed (i.e.,
`rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all
losses are computed.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict method. If first_stage_only=True, we
documentation for the predict method. If number_of_stages=1, we
expect prediction_dict to contain `rpn_box_encodings`,
`rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,
`image_shape`, and `anchors` fields. Otherwise we expect
prediction_dict to additionally contain `refined_box_encodings`,
`class_predictions_with_background`, `num_proposals`, and
`proposal_boxes` fields.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
scope: Optional scope name.
Returns:
......@@ -1283,15 +1554,15 @@ class FasterRCNNMetaArch(model.DetectionModel):
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
(groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_masks_list
) = self._format_groundtruth_data(prediction_dict['image_shape'])
groundtruth_masks_list) = self._format_groundtruth_data(
true_image_shapes)
loss_dict = self._loss_rpn(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'],
groundtruth_boxlists,
groundtruth_classes_with_background_list)
if not self._first_stage_only:
if self._number_of_stages > 1:
loss_dict.update(
self._loss_box_classifier(
prediction_dict['refined_box_encodings'],
......@@ -1352,7 +1623,7 @@ class FasterRCNNMetaArch(model.DetectionModel):
return self._first_stage_sampler.subsample(
tf.cast(cls_weights, tf.bool),
self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool))
batch_sampled_indices = tf.to_float(tf.map_fn(
batch_sampled_indices = tf.to_float(shape_utils.static_or_dynamic_map_fn(
_minibatch_subsample_fn,
[batch_cls_targets, batch_cls_weights],
dtype=tf.bool,
......@@ -1491,10 +1762,13 @@ class FasterRCNNMetaArch(model.DetectionModel):
second_stage_loc_losses = self._second_stage_localization_loss(
reshaped_refined_box_encodings,
batch_reg_targets, weights=batch_reg_weights) / normalizer
second_stage_cls_losses = self._second_stage_classification_loss(
second_stage_cls_losses = ops.reduce_sum_trailing_dimensions(
self._second_stage_classification_loss(
class_predictions_with_background,
batch_cls_targets_with_background,
weights=batch_cls_weights) / normalizer
weights=batch_cls_weights),
ndims=2) / normalizer
second_stage_loc_loss = tf.reduce_sum(
tf.boolean_mask(second_stage_loc_losses, paddings_indicator))
second_stage_cls_loss = tf.reduce_sum(
......@@ -1522,9 +1796,9 @@ class FasterRCNNMetaArch(model.DetectionModel):
# Create a new target assigner that matches the proposals to groundtruth
# and returns the mask targets.
# TODO: Move `unmatched_cls_target` from constructor to assign function.
# This will enable reuse of a single target assigner for both class
# targets and mask targets.
# TODO: Move `unmatched_cls_target` from constructor to assign
# function. This will enable reuse of a single target assigner for both
# class targets and mask targets.
mask_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN', 'detection',
unmatched_cls_target=tf.zeros(image_shape[1:3], dtype=tf.float32))
......@@ -1566,14 +1840,16 @@ class FasterRCNNMetaArch(model.DetectionModel):
flat_cropped_gt_mask,
[batch_size, -1, mask_height * mask_width])
second_stage_mask_losses = self._second_stage_mask_loss(
second_stage_mask_losses = ops.reduce_sum_trailing_dimensions(
self._second_stage_mask_loss(
reshaped_prediction_masks,
batch_cropped_gt_mask,
weights=batch_mask_target_weights) / (
mask_height * mask_width *
tf.maximum(tf.reduce_sum(batch_mask_target_weights, axis=1,
keep_dims=True),
tf.ones((batch_size, 1))))
weights=batch_mask_target_weights),
ndims=2) / (
mask_height * mask_width * tf.maximum(
tf.reduce_sum(
batch_mask_target_weights, axis=1, keep_dims=True
), tf.ones((batch_size, 1))))
second_stage_mask_loss = tf.reduce_sum(
tf.boolean_mask(second_stage_mask_losses, paddings_indicator))
......@@ -1647,7 +1923,9 @@ class FasterRCNNMetaArch(model.DetectionModel):
cls_losses=tf.expand_dims(single_image_cls_loss, 0),
decoded_boxlist_list=[proposal_boxlist])
def restore_map(self, from_detection_checkpoint=True):
def restore_map(self,
from_detection_checkpoint=True,
load_all_detection_checkpoint_vars=False):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
......@@ -1655,7 +1933,11 @@ class FasterRCNNMetaArch(model.DetectionModel):
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
classification checkpoint for initialization prior to training. Default
True.
load_all_detection_checkpoint_vars: whether to load all variables (when
`from_detection_checkpoint` is True). If False, only variables within
the feature extractor scopes are included. Default False.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
......@@ -1670,8 +1952,12 @@ class FasterRCNNMetaArch(model.DetectionModel):
variables_to_restore.append(slim.get_or_create_global_step())
# Only load feature extractor variables to be consistent with loading from
# a classification checkpoint.
include_patterns = None
if not load_all_detection_checkpoint_vars:
include_patterns = [
self.first_stage_feature_extractor_scope,
self.second_stage_feature_extractor_scope
]
feature_extractor_variables = tf.contrib.framework.filter_variables(
variables_to_restore,
include_patterns=[self.first_stage_feature_extractor_scope,
self.second_stage_feature_extractor_scope])
variables_to_restore, include_patterns=include_patterns)
return {var.op.name: var for var in feature_extractor_variables}
......@@ -26,7 +26,7 @@ class FasterRCNNMetaArchTest(
def test_postprocess_second_stage_only_inference_mode_with_masks(self):
model = self._build_model(
is_training=False, first_stage_only=False, second_stage_batch_size=6)
is_training=False, number_of_stages=2, second_stage_batch_size=6)
batch_size = 2
total_num_padded_proposals = batch_size * model.max_num_proposals
......@@ -61,6 +61,7 @@ class FasterRCNNMetaArchTest(
[[1, 1], [1, 1]],
[[0, 0], [0, 0]]]])
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background': class_predictions_with_background,
......@@ -68,7 +69,7 @@ class FasterRCNNMetaArchTest(
'proposal_boxes': proposal_boxes,
'image_shape': image_shape,
'mask_predictions': mask_predictions
})
}, true_image_shapes)
with self.test_session() as sess:
detections_out = sess.run(detections)
self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])
......@@ -79,6 +80,227 @@ class FasterRCNNMetaArchTest(
self.assertAllClose(detections_out['num_detections'], [5, 4])
self.assertAllClose(detections_out['detection_masks'],
exp_detection_masks)
self.assertTrue(np.amax(detections_out['detection_masks'] <= 1.0))
self.assertTrue(np.amin(detections_out['detection_masks'] >= 0.0))
def test_predict_correct_shapes_in_inference_mode_three_stages_with_masks(
self):
batch_size = 2
image_size = 10
max_num_proposals = 8
initial_crop_size = 3
maxpool_stride = 1
input_shapes = [(batch_size, image_size, image_size, 3),
(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
expected_num_anchors = image_size * image_size * 3 * 3
expected_shapes = {
'rpn_box_predictor_features':
(2, image_size, image_size, 512),
'rpn_features_to_crop': (2, image_size, image_size, 3),
'image_shape': (4,),
'rpn_box_encodings': (2, expected_num_anchors, 4),
'rpn_objectness_predictions_with_background':
(2, expected_num_anchors, 2),
'anchors': (expected_num_anchors, 4),
'refined_box_encodings': (2 * max_num_proposals, 2, 4),
'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),
'num_proposals': (2,),
'proposal_boxes': (2, max_num_proposals, 4),
'proposal_boxes_normalized': (2, max_num_proposals, 4),
'box_classifier_features':
self._get_box_classifier_features_shape(image_size,
batch_size,
max_num_proposals,
initial_crop_size,
maxpool_stride,
3)
}
for input_shape in input_shapes:
test_graph = tf.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=False,
number_of_stages=3,
second_stage_batch_size=2,
predict_masks=True)
preprocessed_inputs = tf.placeholder(tf.float32, shape=input_shape)
_, true_image_shapes = model.preprocess(preprocessed_inputs)
result_tensor_dict = model.predict(preprocessed_inputs,
true_image_shapes)
init_op = tf.global_variables_initializer()
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
tensor_dict_out = sess.run(result_tensor_dict, feed_dict={
preprocessed_inputs:
np.zeros((batch_size, image_size, image_size, 3))})
self.assertEqual(
set(tensor_dict_out.keys()),
set(expected_shapes.keys()).union(
set([
'detection_boxes', 'detection_scores', 'detection_classes',
'detection_masks', 'num_detections'
])))
for key in expected_shapes:
self.assertAllEqual(tensor_dict_out[key].shape, expected_shapes[key])
self.assertAllEqual(tensor_dict_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllEqual(tensor_dict_out['detection_masks'].shape,
[2, 5, 14, 14])
self.assertAllEqual(tensor_dict_out['detection_classes'].shape, [2, 5])
self.assertAllEqual(tensor_dict_out['detection_scores'].shape, [2, 5])
self.assertAllEqual(tensor_dict_out['num_detections'].shape, [2])
def test_predict_gives_correct_shapes_in_train_mode_both_stages_with_masks(
self):
test_graph = tf.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=True,
number_of_stages=2,
second_stage_batch_size=7,
predict_masks=True)
batch_size = 2
image_size = 10
max_num_proposals = 7
initial_crop_size = 3
maxpool_stride = 1
image_shape = (batch_size, image_size, image_size, 3)
preprocessed_inputs = tf.zeros(image_shape, dtype=tf.float32)
groundtruth_boxes_list = [
tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32),
tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)
]
groundtruth_classes_list = [
tf.constant([[1, 0], [0, 1]], dtype=tf.float32),
tf.constant([[1, 0], [1, 0]], dtype=tf.float32)
]
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes)
expected_shapes = {
'rpn_box_predictor_features': (2, image_size, image_size, 512),
'rpn_features_to_crop': (2, image_size, image_size, 3),
'image_shape': (4,),
'refined_box_encodings': (2 * max_num_proposals, 2, 4),
'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),
'num_proposals': (2,),
'proposal_boxes': (2, max_num_proposals, 4),
'proposal_boxes_normalized': (2, max_num_proposals, 4),
'box_classifier_features':
self._get_box_classifier_features_shape(
image_size, batch_size, max_num_proposals, initial_crop_size,
maxpool_stride, 3),
'mask_predictions': (2 * max_num_proposals, 2, 14, 14)
}
init_op = tf.global_variables_initializer()
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
tensor_dict_out = sess.run(result_tensor_dict)
self.assertEqual(
set(tensor_dict_out.keys()),
set(expected_shapes.keys()).union(
set([
'rpn_box_encodings',
'rpn_objectness_predictions_with_background',
'anchors',
])))
for key in expected_shapes:
self.assertAllEqual(tensor_dict_out[key].shape, expected_shapes[key])
anchors_shape_out = tensor_dict_out['anchors'].shape
self.assertEqual(2, len(anchors_shape_out))
self.assertEqual(4, anchors_shape_out[1])
num_anchors_out = anchors_shape_out[0]
self.assertAllEqual(tensor_dict_out['rpn_box_encodings'].shape,
(2, num_anchors_out, 4))
self.assertAllEqual(
tensor_dict_out['rpn_objectness_predictions_with_background'].shape,
(2, num_anchors_out, 2))
def test_postprocess_third_stage_only_inference_mode(self):
num_proposals_shapes = [(2), (None)]
refined_box_encodings_shapes = [(16, 2, 4), (None, 2, 4)]
class_predictions_with_background_shapes = [(16, 3), (None, 3)]
proposal_boxes_shapes = [(2, 8, 4), (None, 8, 4)]
batch_size = 2
image_shape = np.array((2, 36, 48, 3), dtype=np.int32)
for (num_proposals_shape, refined_box_encoding_shape,
class_predictions_with_background_shape,
proposal_boxes_shape) in zip(num_proposals_shapes,
refined_box_encodings_shapes,
class_predictions_with_background_shapes,
proposal_boxes_shapes):
tf_graph = tf.Graph()
with tf_graph.as_default():
model = self._build_model(
is_training=False, number_of_stages=3,
second_stage_batch_size=6, predict_masks=True)
total_num_padded_proposals = batch_size * model.max_num_proposals
proposal_boxes = np.array(
[[[1, 1, 2, 3],
[0, 0, 1, 1],
[.5, .5, .6, .6],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],
[[2, 3, 6, 8],
[1, 2, 5, 3],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]])
num_proposals = np.array([3, 2], dtype=np.int32)
refined_box_encodings = np.zeros(
[total_num_padded_proposals, model.num_classes, 4])
class_predictions_with_background = np.ones(
[total_num_padded_proposals, model.num_classes+1])
num_proposals_placeholder = tf.placeholder(tf.int32,
shape=num_proposals_shape)
refined_box_encodings_placeholder = tf.placeholder(
tf.float32, shape=refined_box_encoding_shape)
class_predictions_with_background_placeholder = tf.placeholder(
tf.float32, shape=class_predictions_with_background_shape)
proposal_boxes_placeholder = tf.placeholder(
tf.float32, shape=proposal_boxes_shape)
image_shape_placeholder = tf.placeholder(tf.int32, shape=(4))
_, true_image_shapes = model.preprocess(
tf.zeros(image_shape_placeholder))
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings_placeholder,
'class_predictions_with_background':
class_predictions_with_background_placeholder,
'num_proposals': num_proposals_placeholder,
'proposal_boxes': proposal_boxes_placeholder,
'image_shape': image_shape_placeholder,
'detection_boxes': tf.zeros([2, 5, 4]),
'detection_masks': tf.zeros([2, 5, 14, 14]),
'detection_scores': tf.zeros([2, 5]),
'detection_classes': tf.zeros([2, 5]),
'num_detections': tf.zeros([2]),
}, true_image_shapes)
with self.test_session(graph=tf_graph) as sess:
detections_out = sess.run(
detections,
feed_dict={
refined_box_encodings_placeholder: refined_box_encodings,
class_predictions_with_background_placeholder:
class_predictions_with_background,
num_proposals_placeholder: num_proposals,
proposal_boxes_placeholder: proposal_boxes,
image_shape_placeholder: image_shape
})
self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllEqual(detections_out['detection_masks'].shape,
[2, 5, 14, 14])
self.assertAllClose(detections_out['detection_scores'].shape, [2, 5])
self.assertAllClose(detections_out['detection_classes'].shape, [2, 5])
self.assertAllClose(detections_out['num_detections'].shape, [2])
self.assertTrue(np.amax(detections_out['detection_masks'] <= 1.0))
self.assertTrue(np.amin(detections_out['detection_masks'] >= 0.0))
def _get_box_classifier_features_shape(self,
image_size,
......
......@@ -89,10 +89,39 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
"""
return box_predictor_text_proto
def _get_second_stage_box_predictor(self, num_classes, is_training):
def _add_mask_to_second_stage_box_predictor_text_proto(self):
box_predictor_text_proto = """
mask_rcnn_box_predictor {
predict_instance_masks: true
mask_height: 14
mask_width: 14
conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
}
"""
return box_predictor_text_proto
def _get_second_stage_box_predictor(self, num_classes, is_training,
predict_masks):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(self._get_second_stage_box_predictor_text_proto(),
box_predictor_proto)
if predict_masks:
text_format.Merge(
self._add_mask_to_second_stage_box_predictor_text_proto(),
box_predictor_proto)
return box_predictor_builder.build(
hyperparams_builder.build,
box_predictor_proto,
......@@ -109,15 +138,36 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
def _build_model(self,
is_training,
first_stage_only,
number_of_stages,
second_stage_batch_size,
first_stage_max_proposals=8,
num_classes=2,
hard_mining=False,
softmax_second_stage_classification_loss=True):
def image_resizer_fn(image):
return tf.identity(image)
softmax_second_stage_classification_loss=True,
predict_masks=False,
pad_to_max_dimension=None):
def image_resizer_fn(image, masks=None):
"""Fake image resizer function."""
resized_inputs = []
resized_image = tf.identity(image)
if pad_to_max_dimension is not None:
resized_image = tf.image.pad_to_bounding_box(image, 0, 0,
pad_to_max_dimension,
pad_to_max_dimension)
resized_inputs.append(resized_image)
if masks is not None:
resized_masks = tf.identity(masks)
if pad_to_max_dimension is not None:
resized_masks = tf.image.pad_to_bounding_box(tf.transpose(masks,
[1, 2, 0]),
0, 0,
pad_to_max_dimension,
pad_to_max_dimension)
resized_masks = tf.transpose(resized_masks, [2, 0, 1])
resized_inputs.append(resized_masks)
resized_inputs.append(tf.shape(image))
return resized_inputs
# anchors in this test are designed so that a subset of anchors are inside
# the image and a subset of anchors are outside.
......@@ -181,10 +231,10 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
second_stage_classification_loss_weight = 1.0
if softmax_second_stage_classification_loss:
second_stage_classification_loss = (
losses.WeightedSoftmaxClassificationLoss(anchorwise_output=True))
losses.WeightedSoftmaxClassificationLoss())
else:
second_stage_classification_loss = (
losses.WeightedSigmoidClassificationLoss(anchorwise_output=True))
losses.WeightedSigmoidClassificationLoss())
hard_example_miner = None
if hard_mining:
......@@ -201,7 +251,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
'num_classes': num_classes,
'image_resizer_fn': image_resizer_fn,
'feature_extractor': fake_feature_extractor,
'first_stage_only': first_stage_only,
'number_of_stages': number_of_stages,
'first_stage_anchor_generator': first_stage_anchor_generator,
'first_stage_atrous_rate': first_stage_atrous_rate,
'first_stage_box_predictor_arg_scope':
......@@ -232,23 +282,27 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
second_stage_classification_loss,
'hard_example_miner': hard_example_miner}
return self._get_model(self._get_second_stage_box_predictor(
num_classes=num_classes, is_training=is_training), **common_kwargs)
return self._get_model(
self._get_second_stage_box_predictor(
num_classes=num_classes,
is_training=is_training,
predict_masks=predict_masks), **common_kwargs)
def test_predict_gives_correct_shapes_in_inference_mode_first_stage_only(
self):
test_graph = tf.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=False, first_stage_only=True, second_stage_batch_size=2)
is_training=False, number_of_stages=1, second_stage_batch_size=2)
batch_size = 2
height = 10
width = 12
input_image_shape = (batch_size, height, width, 3)
preprocessed_inputs = tf.placeholder(dtype=tf.float32,
shape=(batch_size, None, None, 3))
prediction_dict = model.predict(preprocessed_inputs)
_, true_image_shapes = model.preprocess(tf.zeros(input_image_shape))
preprocessed_inputs = tf.placeholder(
dtype=tf.float32, shape=(batch_size, None, None, 3))
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
# In inference mode, anchors are clipped to the image window, but not
# pruned. Since MockFasterRCNN.extract_proposal_features returns a
......@@ -269,7 +323,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
prediction_out = sess.run(prediction_dict,
feed_dict={
......@@ -295,14 +349,15 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
test_graph = tf.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=True, first_stage_only=True, second_stage_batch_size=2)
is_training=True, number_of_stages=1, second_stage_batch_size=2)
batch_size = 2
height = 10
width = 12
input_image_shape = (batch_size, height, width, 3)
preprocessed_inputs = tf.placeholder(dtype=tf.float32,
shape=(batch_size, None, None, 3))
prediction_dict = model.predict(preprocessed_inputs)
_, true_image_shapes = model.preprocess(tf.zeros(input_image_shape))
preprocessed_inputs = tf.placeholder(
dtype=tf.float32, shape=(batch_size, None, None, 3))
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
expected_output_keys = set([
'rpn_box_predictor_features', 'rpn_features_to_crop', 'image_shape',
......@@ -314,7 +369,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
num_anchors_strict_upper_bound = height * width * 3 * 3
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
prediction_out = sess.run(prediction_dict,
feed_dict={
......@@ -344,8 +399,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
prediction_out['rpn_objectness_predictions_with_background'].shape,
(batch_size, num_anchors_out, 2))
def test_predict_correct_shapes_in_inference_mode_both_stages(
self):
def test_predict_correct_shapes_in_inference_mode_two_stages(self):
batch_size = 2
image_size = 10
max_num_proposals = 8
......@@ -384,10 +438,14 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
test_graph = tf.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=False, first_stage_only=False,
second_stage_batch_size=2)
is_training=False,
number_of_stages=2,
second_stage_batch_size=2,
predict_masks=False)
preprocessed_inputs = tf.placeholder(tf.float32, shape=input_shape)
result_tensor_dict = model.predict(preprocessed_inputs)
_, true_image_shapes = model.preprocess(preprocessed_inputs)
result_tensor_dict = model.predict(
preprocessed_inputs, true_image_shapes)
init_op = tf.global_variables_initializer()
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
......@@ -403,7 +461,10 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
test_graph = tf.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=True, first_stage_only=False, second_stage_batch_size=7)
is_training=True,
number_of_stages=2,
second_stage_batch_size=7,
predict_masks=False)
batch_size = 2
image_size = 10
......@@ -420,10 +481,11 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
tf.constant([[1, 0], [0, 1]], dtype=tf.float32),
tf.constant([[1, 0], [1, 0]], dtype=tf.float32)]
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
result_tensor_dict = model.predict(preprocessed_inputs)
result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes)
expected_shapes = {
'rpn_box_predictor_features':
(2, image_size, image_size, 512),
......@@ -444,7 +506,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
tensor_dict_out = sess.run(result_tensor_dict)
self.assertEqual(set(tensor_dict_out.keys()),
......@@ -465,9 +527,11 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
tensor_dict_out['rpn_objectness_predictions_with_background'].shape,
(2, num_anchors_out, 2))
def test_postprocess_first_stage_only_inference_mode(self):
def _test_postprocess_first_stage_only_inference_mode(
self, pad_to_max_dimension=None):
model = self._build_model(
is_training=False, first_stage_only=True, second_stage_batch_size=6)
is_training=False, number_of_stages=1, second_stage_batch_size=6,
pad_to_max_dimension=pad_to_max_dimension)
batch_size = 2
anchors = tf.constant(
[[0, 0, 16, 16],
......@@ -490,13 +554,13 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
[10, -11]]], dtype=tf.float32)
rpn_features_to_crop = tf.ones((batch_size, 8, 8, 10), dtype=tf.float32)
image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32)
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
proposals = model.postprocess({
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'rpn_features_to_crop': rpn_features_to_crop,
'anchors': anchors,
'image_shape': image_shape})
'anchors': anchors}, true_image_shapes)
expected_proposal_boxes = [
[[0, 0, .5, .5], [.5, .5, 1, 1], [0, .5, .5, 1], [.5, 0, 1.0, .5]]
+ 4 * [4 * [0]],
......@@ -518,9 +582,18 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
self.assertAllEqual(proposals_out['num_detections'],
expected_num_proposals)
def test_postprocess_first_stage_only_train_mode(self):
def test_postprocess_first_stage_only_inference_mode(self):
self._test_postprocess_first_stage_only_inference_mode()
def test_postprocess_first_stage_only_inference_mode_padded_image(self):
self._test_postprocess_first_stage_only_inference_mode(
pad_to_max_dimension=56)
def _test_postprocess_first_stage_only_train_mode(self,
pad_to_max_dimension=None):
model = self._build_model(
is_training=True, first_stage_only=True, second_stage_batch_size=2)
is_training=True, number_of_stages=1, second_stage_batch_size=2,
pad_to_max_dimension=pad_to_max_dimension)
batch_size = 2
anchors = tf.constant(
[[0, 0, 16, 16],
......@@ -549,6 +622,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]], dtype=tf.float32),
tf.constant([[1, 0], [1, 0]], dtype=tf.float32)]
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
proposals = model.postprocess({
......@@ -556,8 +630,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'rpn_features_to_crop': rpn_features_to_crop,
'anchors': anchors,
'image_shape': image_shape})
'anchors': anchors}, true_image_shapes)
expected_proposal_boxes = [
[[0, 0, .5, .5], [.5, .5, 1, 1]], [[0, .5, .5, 1], [.5, 0, 1, .5]]]
expected_proposal_scores = [[1, 1],
......@@ -577,8 +650,15 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
self.assertAllEqual(proposals_out['num_detections'],
expected_num_proposals)
def test_postprocess_second_stage_only_inference_mode(self):
num_proposals_shapes = [(2), (None)]
def test_postprocess_first_stage_only_train_mode(self):
self._test_postprocess_first_stage_only_train_mode()
def test_postprocess_first_stage_only_train_mode_padded_image(self):
self._test_postprocess_first_stage_only_train_mode(pad_to_max_dimension=56)
def _test_postprocess_second_stage_only_inference_mode(
self, pad_to_max_dimension=None):
num_proposals_shapes = [(2), (None,)]
refined_box_encodings_shapes = [(16, 2, 4), (None, 2, 4)]
class_predictions_with_background_shapes = [(16, 3), (None, 3)]
proposal_boxes_shapes = [(2, 8, 4), (None, 8, 4)]
......@@ -593,8 +673,10 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
tf_graph = tf.Graph()
with tf_graph.as_default():
model = self._build_model(
is_training=False, first_stage_only=False,
second_stage_batch_size=6)
is_training=False, number_of_stages=2,
second_stage_batch_size=6,
pad_to_max_dimension=pad_to_max_dimension)
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
total_num_padded_proposals = batch_size * model.max_num_proposals
proposal_boxes = np.array(
[[[1, 1, 2, 3],
......@@ -626,8 +708,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
class_predictions_with_background_placeholder,
'num_proposals': num_proposals_placeholder,
'proposal_boxes': proposal_boxes_placeholder,
'image_shape': image_shape_placeholder,
})
}, true_image_shapes)
with self.test_session(graph=tf_graph) as sess:
detections_out = sess.run(
detections,
......@@ -646,21 +727,28 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
[[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]])
self.assertAllClose(detections_out['num_detections'], [5, 4])
def test_postprocess_second_stage_only_inference_mode(self):
self._test_postprocess_second_stage_only_inference_mode()
def test_postprocess_second_stage_only_inference_mode_padded_image(self):
self._test_postprocess_second_stage_only_inference_mode(
pad_to_max_dimension=56)
def test_preprocess_preserves_input_shapes(self):
image_shapes = [(3, None, None, 3),
(None, 10, 10, 3),
(None, None, None, 3)]
for image_shape in image_shapes:
model = self._build_model(
is_training=False, first_stage_only=False, second_stage_batch_size=6)
is_training=False, number_of_stages=2, second_stage_batch_size=6)
image_placeholder = tf.placeholder(tf.float32, shape=image_shape)
preprocessed_inputs = model.preprocess(image_placeholder)
preprocessed_inputs, _ = model.preprocess(image_placeholder)
self.assertAllEqual(preprocessed_inputs.shape.as_list(), image_shape)
# TODO: Split test into two - with and without masks.
def test_loss_first_stage_only_mode(self):
model = self._build_model(
is_training=True, first_stage_only=True, second_stage_batch_size=6)
is_training=True, number_of_stages=1, second_stage_batch_size=6)
batch_size = 2
anchors = tf.constant(
[[0, 0, 16, 16],
......@@ -698,9 +786,10 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
'image_shape': image_shape,
'anchors': anchors
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
loss_dict = model.loss(prediction_dict)
loss_dict = model.loss(prediction_dict, true_image_shapes)
with self.test_session() as sess:
loss_dict_out = sess.run(loss_dict)
self.assertAllClose(loss_dict_out['first_stage_localization_loss'], 0)
......@@ -711,7 +800,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
# TODO: Split test into two - with and without masks.
def test_loss_full(self):
model = self._build_model(
is_training=True, first_stage_only=False, second_stage_batch_size=6)
is_training=True, number_of_stages=2, second_stage_batch_size=6)
batch_size = 2
anchors = tf.constant(
[[0, 0, 16, 16],
......@@ -793,10 +882,11 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
'num_proposals': num_proposals,
'mask_predictions': mask_predictions_logits
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list)
loss_dict = model.loss(prediction_dict)
loss_dict = model.loss(prediction_dict, true_image_shapes)
with self.test_session() as sess:
loss_dict_out = sess.run(loss_dict)
......@@ -808,7 +898,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
def test_loss_full_zero_padded_proposals(self):
model = self._build_model(
is_training=True, first_stage_only=False, second_stage_batch_size=6)
is_training=True, number_of_stages=2, second_stage_batch_size=6)
batch_size = 1
anchors = tf.constant(
[[0, 0, 16, 16],
......@@ -880,10 +970,11 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
'num_proposals': num_proposals,
'mask_predictions': mask_predictions_logits
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list)
loss_dict = model.loss(prediction_dict)
loss_dict = model.loss(prediction_dict, true_image_shapes)
with self.test_session() as sess:
loss_dict_out = sess.run(loss_dict)
......@@ -895,7 +986,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
def test_loss_full_multiple_label_groundtruth(self):
model = self._build_model(
is_training=True, first_stage_only=False, second_stage_batch_size=6,
is_training=True, number_of_stages=2, second_stage_batch_size=6,
softmax_second_stage_classification_loss=False)
batch_size = 1
anchors = tf.constant(
......@@ -975,10 +1066,11 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
'num_proposals': num_proposals,
'mask_predictions': mask_predictions_logits
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list)
loss_dict = model.loss(prediction_dict)
loss_dict = model.loss(prediction_dict, true_image_shapes)
with self.test_session() as sess:
loss_dict_out = sess.run(loss_dict)
......@@ -990,7 +1082,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
def test_loss_full_zero_padded_proposals_nonzero_loss_with_two_images(self):
model = self._build_model(
is_training=True, first_stage_only=False, second_stage_batch_size=6)
is_training=True, number_of_stages=2, second_stage_batch_size=6)
batch_size = 2
anchors = tf.constant(
[[0, 0, 16, 16],
......@@ -1074,9 +1166,10 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
'proposal_boxes': proposal_boxes,
'num_proposals': num_proposals
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
loss_dict = model.loss(prediction_dict)
loss_dict = model.loss(prediction_dict, true_image_shapes)
with self.test_session() as sess:
loss_dict_out = sess.run(loss_dict)
......@@ -1089,7 +1182,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
def test_loss_with_hard_mining(self):
model = self._build_model(is_training=True,
first_stage_only=False,
number_of_stages=2,
second_stage_batch_size=None,
first_stage_max_proposals=6,
hard_mining=True)
......@@ -1163,9 +1256,10 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
'proposal_boxes': proposal_boxes,
'num_proposals': num_proposals
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
loss_dict = model.loss(prediction_dict)
loss_dict = model.loss(prediction_dict, true_image_shapes)
with self.test_session() as sess:
loss_dict_out = sess.run(loss_dict)
......@@ -1185,7 +1279,7 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session() as sess:
with self.test_session(graph=test_graph_classification) as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
......@@ -1194,64 +1288,89 @@ class FasterRCNNMetaArchTestBase(tf.test.TestCase):
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model = self._build_model(
is_training=False, first_stage_only=False, second_stage_batch_size=6)
is_training=False, number_of_stages=2, second_stage_batch_size=6)
inputs_shape = (2, 20, 20, 3)
inputs = tf.to_float(tf.random_uniform(
inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs)
model.postprocess(prediction_dict)
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
var_map = model.restore_map(from_detection_checkpoint=False)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
with self.test_session() as sess:
with self.test_session(graph=test_graph_classification) as sess:
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn(model.first_stage_feature_extractor_scope, var.name)
self.assertNotIn(model.second_stage_feature_extractor_scope,
var.name)
self.assertNotIn(model.first_stage_feature_extractor_scope, var)
self.assertNotIn(model.second_stage_feature_extractor_scope, var)
def test_restore_map_for_detection_ckpt(self):
# Define first detection graph and save variables.
test_graph_detection1 = tf.Graph()
with test_graph_detection1.as_default():
model = self._build_model(
is_training=False, first_stage_only=False, second_stage_batch_size=6)
is_training=False, number_of_stages=2, second_stage_batch_size=6)
inputs_shape = (2, 20, 20, 3)
inputs = tf.to_float(tf.random_uniform(
inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs)
model.postprocess(prediction_dict)
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session() as sess:
with self.test_session(graph=test_graph_detection1) as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
# Define second detection graph and restore variables.
test_graph_detection2 = tf.Graph()
with test_graph_detection2.as_default():
model2 = self._build_model(is_training=False, first_stage_only=False,
model2 = self._build_model(is_training=False, number_of_stages=2,
second_stage_batch_size=6, num_classes=42)
inputs_shape2 = (2, 20, 20, 3)
inputs2 = tf.to_float(tf.random_uniform(
inputs_shape2, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs2 = model2.preprocess(inputs2)
prediction_dict2 = model2.predict(preprocessed_inputs2)
model2.postprocess(prediction_dict2)
preprocessed_inputs2, true_image_shapes = model2.preprocess(inputs2)
prediction_dict2 = model2.predict(preprocessed_inputs2, true_image_shapes)
model2.postprocess(prediction_dict2, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model2.restore_map(from_detection_checkpoint=True)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
with self.test_session() as sess:
with self.test_session(graph=test_graph_detection2) as sess:
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn(model2.first_stage_feature_extractor_scope, var.name)
self.assertNotIn(model2.second_stage_feature_extractor_scope,
var.name)
uninitialized_vars_list = sess.run(tf.report_uninitialized_variables())
self.assertIn('another_variable', uninitialized_vars_list)
for var in uninitialized_vars_list:
self.assertNotIn(model2.first_stage_feature_extractor_scope, var)
self.assertNotIn(model2.second_stage_feature_extractor_scope, var)
def test_load_all_det_checkpoint_vars(self):
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model = self._build_model(
is_training=False,
number_of_stages=2,
second_stage_batch_size=6,
num_classes=42)
inputs_shape = (2, 20, 20, 3)
inputs = tf.to_float(
tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(
from_detection_checkpoint=True,
load_all_detection_checkpoint_vars=True)
self.assertIsInstance(var_map, dict)
self.assertIn('another_variable', var_map)
if __name__ == '__main__':
tf.test.main()
......@@ -21,8 +21,8 @@ The R-FCN meta architecture is similar to Faster R-CNN and only differs in the
second stage. Hence this class inherits FasterRCNNMetaArch and overrides only
the `_predict_second_stage` method.
Similar to Faster R-CNN we allow for two modes: first_stage_only=True and
first_stage_only=False. In the former setting, all of the user facing methods
Similar to Faster R-CNN we allow for two modes: number_of_stages=1 and
number_of_stages=2. In the former setting, all of the user facing methods
(e.g., predict, postprocess, loss) can be used as if the model consisted
only of the RPN, returning class agnostic proposals (these can be thought of as
approximate detections with no associated class information). In the latter
......@@ -53,7 +53,7 @@ class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
num_classes,
image_resizer_fn,
feature_extractor,
first_stage_only,
number_of_stages,
first_stage_anchor_generator,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope,
......@@ -75,7 +75,8 @@ class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
second_stage_classification_loss_weight,
second_stage_classification_loss,
hard_example_miner,
parallel_iterations=16):
parallel_iterations=16,
add_summaries=True):
"""RFCNMetaArch Constructor.
Args:
......@@ -90,8 +91,8 @@ class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
returns a rank-3 image tensor, possibly with new spatial dimensions.
See builders/image_resizer_builder.py.
feature_extractor: A FasterRCNNFeatureExtractor object.
first_stage_only: Whether to construct only the Region Proposal Network
(RPN) part of the model.
number_of_stages: Valid values are {1, 2}. If 1 will only construct the
Region Proposal Network (RPN) part of the model.
first_stage_anchor_generator: An anchor_generator.AnchorGenerator object
(note that currently we only support
grid_anchor_generator.GridAnchorGenerator objects)
......@@ -155,17 +156,22 @@ class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
hard_example_miner: A losses.HardExampleMiner object (can be None).
parallel_iterations: (Optional) The number of iterations allowed to run
in parallel for calls to tf.map_fn.
add_summaries: boolean (default: True) controlling whether summary ops
should be added to tensorflow graph.
Raises:
ValueError: If `second_stage_batch_size` > `first_stage_max_proposals`
ValueError: If first_stage_anchor_generator is not of type
grid_anchor_generator.GridAnchorGenerator.
"""
# TODO: add_summaries is currently unused. Respect that directive
# in the future.
super(RFCNMetaArch, self).__init__(
is_training,
num_classes,
image_resizer_fn,
feature_extractor,
first_stage_only,
number_of_stages,
first_stage_anchor_generator,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope,
......@@ -199,14 +205,15 @@ class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
rpn_objectness_predictions_with_background,
rpn_features,
anchors,
image_shape):
"""Predicts the output tensors from 2nd stage of FasterRCNN.
image_shape,
true_image_shapes):
"""Predicts the output tensors from 2nd stage of R-FCN.
Args:
rpn_box_encodings: 4-D float tensor of shape
rpn_box_encodings: 3-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes.
rpn_objectness_predictions_with_background: 2-D float tensor of shape
rpn_objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
......@@ -216,6 +223,10 @@ class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
anchors: 2-D float tensor of shape
[num_anchors, self._box_coder.code_size].
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
......@@ -223,7 +234,7 @@ class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
[total_num_proposals, num_classes, 4] representing predicted
(final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals
2) class_predictions_with_background: a 3-D tensor with shape
2) class_predictions_with_background: a 2-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
......@@ -247,9 +258,11 @@ class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
[batch_size, feature_map_height, feature_map_width, depth],
representing the box classifier features.
"""
image_shape_2d = tf.tile(tf.expand_dims(image_shape[1:], 0),
[image_shape[0], 1])
proposal_boxes_normalized, _, num_proposals = self._postprocess_rpn(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors, image_shape)
anchors, image_shape_2d, true_image_shapes)
box_classifier_features = (
self._feature_extractor.extract_box_classifier_features(
......@@ -257,8 +270,8 @@ class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
scope=self.second_stage_feature_extractor_scope))
box_predictions = self._rfcn_box_predictor.predict(
box_classifier_features,
num_predictions_per_location=1,
[box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
proposal_boxes=proposal_boxes_normalized)
refined_box_encodings = tf.squeeze(
......
......@@ -23,10 +23,10 @@ import re
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_predictor as bpredictor
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils
......@@ -43,7 +43,9 @@ class SSDFeatureExtractor(object):
pad_to_multiple,
conv_hyperparams,
batch_norm_trainable=True,
reuse_weights=None):
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False):
"""Constructor.
Args:
......@@ -58,6 +60,9 @@ class SSDFeatureExtractor(object):
(e.g. 1), it is desirable to disable batch norm update and use
pretrained batch norm params.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
"""
self._is_training = is_training
self._depth_multiplier = depth_multiplier
......@@ -66,6 +71,8 @@ class SSDFeatureExtractor(object):
self._conv_hyperparams = conv_hyperparams
self._batch_norm_trainable = batch_norm_trainable
self._reuse_weights = reuse_weights
self._use_explicit_padding = use_explicit_padding
self._use_depthwise = use_depthwise
@abstractmethod
def preprocess(self, resized_inputs):
......@@ -78,6 +85,10 @@ class SSDFeatureExtractor(object):
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
pass
......@@ -122,9 +133,9 @@ class SSDMetaArch(model.DetectionModel):
add_summaries=True):
"""SSDMetaArch Constructor.
TODO: group NMS parameters + score converter into a class and loss
parameters into a class and write config protos for postprocessing
and losses.
TODO(rathodv,jonathanhuang): group NMS parameters + score converter into
a class and loss parameters into a class and write config protos for
postprocessing and losses.
Args:
is_training: A boolean indicating whether the training version of the
......@@ -138,7 +149,9 @@ class SSDMetaArch(model.DetectionModel):
region_similarity_calculator.RegionSimilarityCalculator object.
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions.
returns a rank-3 image tensor, possibly with new spatial dimensions and
a 1-D tensor of shape [3] indicating shape of true image within
the resized image tensor as the resized image tensor could be padded.
See builders/image_resizer_builder.py.
non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores` and optional `clip_window`
......@@ -174,14 +187,14 @@ class SSDMetaArch(model.DetectionModel):
self._matcher = matcher
self._region_similarity_calculator = region_similarity_calculator
# TODO: handle agnostic mode and positive/negative class weights
# TODO: handle agnostic mode and positive/negative class
# weights
unmatched_cls_target = None
unmatched_cls_target = tf.constant([1] + self.num_classes * [0], tf.float32)
self._target_assigner = target_assigner.TargetAssigner(
self._region_similarity_calculator,
self._matcher,
self._box_coder,
positive_class_weight=1.0,
negative_class_weight=1.0,
unmatched_cls_target=unmatched_cls_target)
......@@ -210,7 +223,9 @@ class SSDMetaArch(model.DetectionModel):
def preprocess(self, inputs):
"""Feature-extractor specific preprocessing.
See base class.
SSD meta architecture uses a default clip_window of [0, 0, 1, 1] during
post-processing. On calling `preprocess` method, clip_window gets updated
based on `true_image_shapes` returned by `image_resizer_fn`.
Args:
inputs: a [batch, height_in, width_in, channels] float tensor representing
......@@ -219,20 +234,69 @@ class SSDMetaArch(model.DetectionModel):
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Raises:
ValueError: if inputs tensor does not have type tf.float32
"""
if inputs.dtype is not tf.float32:
raise ValueError('`preprocess` expects a tf.float32 tensor')
with tf.name_scope('Preprocessor'):
# TODO: revisit whether to always use batch size as the number of parallel
# iterations vs allow for dynamic batching.
resized_inputs = tf.map_fn(self._image_resizer_fn,
# TODO: revisit whether to always use batch size as
# the number of parallel iterations vs allow for dynamic batching.
outputs = shape_utils.static_or_dynamic_map_fn(
self._image_resizer_fn,
elems=inputs,
dtype=tf.float32)
return self._feature_extractor.preprocess(resized_inputs)
dtype=[tf.float32, tf.int32])
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def _compute_clip_window(self, preprocessed_images, true_image_shapes):
"""Computes clip window to use during post_processing.
def predict(self, preprocessed_inputs):
Computes a new clip window to use during post-processing based on
`resized_image_shapes` and `true_image_shapes` only if `preprocess` method
has been called. Otherwise returns a default clip window of [0, 0, 1, 1].
Args:
preprocessed_images: the [batch, height, width, channels] image
tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros. Or None if the clip window should cover the full image.
Returns:
a 2-D float32 tensor of the form [batch_size, 4] containing the clip
window for each image in the batch in normalized coordinates (relative to
the resized dimensions) where each clip window is of the form [ymin, xmin,
ymax, xmax] or a default clip window of [0, 0, 1, 1].
"""
if true_image_shapes is None:
return tf.constant([0, 0, 1, 1], dtype=tf.float32)
resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_images)
true_heights, true_widths, _ = tf.unstack(
tf.to_float(true_image_shapes), axis=1)
padded_height = tf.to_float(resized_inputs_shape[1])
padded_width = tf.to_float(resized_inputs_shape[2])
return tf.stack(
[
tf.zeros_like(true_heights),
tf.zeros_like(true_widths), true_heights / padded_height,
true_widths / padded_width
],
axis=1)
def predict(self, preprocessed_inputs, true_image_shapes):
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the forward
......@@ -244,18 +308,24 @@ class SSDMetaArch(model.DetectionModel):
Args:
preprocessed_inputs: a [batch, height, width, channels] image tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) box_encodings: 4-D float tensor of shape [batch_size, num_anchors,
1) preprocessed_inputs: the [batch, height, width, channels] image
tensor.
2) box_encodings: 4-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
3) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions (at class index 0).
3) feature_maps: a list of tensors where the ith tensor has shape
4) feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
4) anchors: 2-D float tensor of shape [num_anchors, 4] containing
5) anchors: 2-D float tensor of shape [num_anchors, 4] containing
the generated anchors in normalized coordinates.
"""
with tf.variable_scope(None, self._extract_features_scope,
......@@ -263,14 +333,19 @@ class SSDMetaArch(model.DetectionModel):
feature_maps = self._feature_extractor.extract_features(
preprocessed_inputs)
feature_map_spatial_dims = self._get_feature_map_spatial_dims(feature_maps)
image_shape = tf.shape(preprocessed_inputs)
image_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_inputs)
self._anchors = self._anchor_generator.generate(
feature_map_spatial_dims,
im_height=image_shape[1],
im_width=image_shape[2])
(box_encodings, class_predictions_with_background
) = self._add_box_predictions_to_feature_maps(feature_maps)
prediction_dict = self._box_predictor.predict(
feature_maps, self._anchor_generator.num_anchors_per_location())
box_encodings = tf.squeeze(prediction_dict['box_encodings'], axis=2)
class_predictions_with_background = prediction_dict[
'class_predictions_with_background']
predictions_dict = {
'preprocessed_inputs': preprocessed_inputs,
'box_encodings': box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'feature_maps': feature_maps,
......@@ -278,68 +353,6 @@ class SSDMetaArch(model.DetectionModel):
}
return predictions_dict
def _add_box_predictions_to_feature_maps(self, feature_maps):
"""Adds box predictors to each feature map and returns concatenated results.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Returns:
box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions (at class index 0).
Raises:
RuntimeError: if the number of feature maps extracted via the
extract_features method does not match the length of the
num_anchors_per_locations list that was passed to the constructor.
RuntimeError: if box_encodings from the box_predictor does not have
shape of the form [batch_size, num_anchors, 1, code_size].
"""
num_anchors_per_location_list = (
self._anchor_generator.num_anchors_per_location())
if len(feature_maps) != len(num_anchors_per_location_list):
raise RuntimeError('the number of feature maps must match the '
'length of self.anchors.NumAnchorsPerLocation().')
box_encodings_list = []
cls_predictions_with_background_list = []
for idx, (feature_map, num_anchors_per_location
) in enumerate(zip(feature_maps, num_anchors_per_location_list)):
box_predictor_scope = 'BoxPredictor_{}'.format(idx)
box_predictions = self._box_predictor.predict(feature_map,
num_anchors_per_location,
box_predictor_scope)
box_encodings = box_predictions[bpredictor.BOX_ENCODINGS]
cls_predictions_with_background = box_predictions[
bpredictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
box_encodings_shape = box_encodings.get_shape().as_list()
if len(box_encodings_shape) != 4 or box_encodings_shape[2] != 1:
raise RuntimeError('box_encodings from the box_predictor must be of '
'shape `[batch_size, num_anchors, 1, code_size]`; '
'actual shape', box_encodings_shape)
box_encodings = tf.squeeze(box_encodings, axis=2)
box_encodings_list.append(box_encodings)
cls_predictions_with_background_list.append(
cls_predictions_with_background)
num_predictions = sum(
[tf.shape(box_encodings)[1] for box_encodings in box_encodings_list])
num_anchors = self.anchors.num_boxes()
anchors_assert = tf.assert_equal(num_anchors, num_predictions, [
'Mismatch: number of anchors vs number of predictions', num_anchors,
num_predictions
])
with tf.control_dependencies([anchors_assert]):
box_encodings = tf.concat(box_encodings_list, 1)
class_predictions_with_background = tf.concat(
cls_predictions_with_background_list, 1)
return box_encodings, class_predictions_with_background
def _get_feature_map_spatial_dims(self, feature_maps):
"""Return list of spatial dimensions for each feature map in a list.
......@@ -356,7 +369,7 @@ class SSDMetaArch(model.DetectionModel):
]
return [(shape[1], shape[2]) for shape in feature_map_shapes]
def postprocess(self, prediction_dict):
def postprocess(self, prediction_dict, true_image_shapes):
"""Converts prediction tensors to final detections.
This function converts raw predictions tensors to final detection results by
......@@ -370,12 +383,18 @@ class SSDMetaArch(model.DetectionModel):
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
1) preprocessed_inputs: a [batch, height, width, channels] image
tensor.
2) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
3) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros. Or None, if the clip window should cover the full image.
Returns:
detections: a dictionary containing the following fields
......@@ -393,18 +412,18 @@ class SSDMetaArch(model.DetectionModel):
'class_predictions_with_background' not in prediction_dict):
raise ValueError('prediction_dict does not contain expected entries.')
with tf.name_scope('Postprocessor'):
preprocessed_images = prediction_dict['preprocessed_inputs']
box_encodings = prediction_dict['box_encodings']
class_predictions = prediction_dict['class_predictions_with_background']
detection_boxes, detection_keypoints = self._batch_decode(box_encodings)
detection_boxes = tf.expand_dims(detection_boxes, axis=2)
class_predictions_without_background = tf.slice(class_predictions,
[0, 0, 1],
detection_scores_with_background = self._score_conversion_fn(
class_predictions)
detection_scores = tf.slice(detection_scores_with_background, [0, 0, 1],
[-1, -1, -1])
detection_scores = self._score_conversion_fn(
class_predictions_without_background)
clip_window = tf.constant([0, 0, 1, 1], tf.float32)
additional_fields = None
if detection_keypoints is not None:
additional_fields = {
fields.BoxListFields.keypoints: detection_keypoints}
......@@ -412,19 +431,23 @@ class SSDMetaArch(model.DetectionModel):
num_detections) = self._non_max_suppression_fn(
detection_boxes,
detection_scores,
clip_window=clip_window,
clip_window=self._compute_clip_window(
preprocessed_images, true_image_shapes),
additional_fields=additional_fields)
detection_dict = {'detection_boxes': nmsed_boxes,
'detection_scores': nmsed_scores,
'detection_classes': nmsed_classes,
'num_detections': tf.to_float(num_detections)}
detection_dict = {
fields.DetectionResultFields.detection_boxes: nmsed_boxes,
fields.DetectionResultFields.detection_scores: nmsed_scores,
fields.DetectionResultFields.detection_classes: nmsed_classes,
fields.DetectionResultFields.num_detections:
tf.to_float(num_detections)
}
if (nmsed_additional_fields is not None and
fields.BoxListFields.keypoints in nmsed_additional_fields):
detection_dict['detection_keypoints'] = nmsed_additional_fields[
fields.BoxListFields.keypoints]
detection_dict[fields.DetectionResultFields.detection_keypoints] = (
nmsed_additional_fields[fields.BoxListFields.keypoints])
return detection_dict
def loss(self, prediction_dict, scope=None):
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
......@@ -438,6 +461,10 @@ class SSDMetaArch(model.DetectionModel):
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
scope: Optional scope name.
Returns:
......@@ -449,25 +476,28 @@ class SSDMetaArch(model.DetectionModel):
keypoints = None
if self.groundtruth_has_field(fields.BoxListFields.keypoints):
keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints)
weights = None
if self.groundtruth_has_field(fields.BoxListFields.weights):
weights = self.groundtruth_lists(fields.BoxListFields.weights)
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list) = self._assign_targets(
self.groundtruth_lists(fields.BoxListFields.boxes),
self.groundtruth_lists(fields.BoxListFields.classes),
keypoints)
keypoints, weights)
if self._add_summaries:
self._summarize_input(
self.groundtruth_lists(fields.BoxListFields.boxes), match_list)
num_matches = tf.stack(
[match.num_matched_columns() for match in match_list])
location_losses = self._localization_loss(
prediction_dict['box_encodings'],
batch_reg_targets,
ignore_nan_targets=True,
weights=batch_reg_weights)
cls_losses = self._classification_loss(
cls_losses = ops.reduce_sum_trailing_dimensions(
self._classification_loss(
prediction_dict['class_predictions_with_background'],
batch_cls_targets,
weights=batch_cls_weights)
weights=batch_cls_weights),
ndims=2)
if self._hard_example_miner:
(localization_loss, classification_loss) = self._apply_hard_mining(
......@@ -487,7 +517,8 @@ class SSDMetaArch(model.DetectionModel):
# Optionally normalize by number of positive matches
normalizer = tf.constant(1.0, dtype=tf.float32)
if self._normalize_loss_by_num_matches:
normalizer = tf.maximum(tf.to_float(tf.reduce_sum(num_matches)), 1.0)
normalizer = tf.maximum(tf.to_float(tf.reduce_sum(batch_reg_weights)),
1.0)
with tf.name_scope('localization_loss'):
localization_loss = ((self._localization_loss_weight / normalizer) *
......@@ -515,7 +546,8 @@ class SSDMetaArch(model.DetectionModel):
'NegativeAnchorLossCDF')
def _assign_targets(self, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_keypoints_list=None):
groundtruth_keypoints_list=None,
groundtruth_weights_list=None):
"""Assign groundtruth targets.
Adds a background class to each one-hot encoding of groundtruth classes
......@@ -532,6 +564,8 @@ class SSDMetaArch(model.DetectionModel):
index assumed to map to the first non-background class.
groundtruth_keypoints_list: (optional) a list of 3-D tensors of shape
[num_boxes, num_keypoints, 2]
groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
......@@ -558,7 +592,7 @@ class SSDMetaArch(model.DetectionModel):
boxlist.add_field(fields.BoxListFields.keypoints, keypoints)
return target_assigner.batch_assign_targets(
self._target_assigner, self.anchors, groundtruth_boxlists,
groundtruth_classes_with_background_list)
groundtruth_classes_with_background_list, groundtruth_weights_list)
def _summarize_input(self, groundtruth_boxes_list, match_list):
"""Creates tensorflow summaries for the input boxes and anchors.
......@@ -675,7 +709,9 @@ class SSDMetaArch(model.DetectionModel):
[combined_shape[0], combined_shape[1], 4]))
return decoded_boxes, decoded_keypoints
def restore_map(self, from_detection_checkpoint=True):
def restore_map(self,
from_detection_checkpoint=True,
load_all_detection_checkpoint_vars=False):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
......@@ -684,6 +720,9 @@ class SSDMetaArch(model.DetectionModel):
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
load_all_detection_checkpoint_vars: whether to load all variables (when
`from_detection_checkpoint` is True). If False, only variables within
the appropriate scopes are included. Default False.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
......@@ -691,10 +730,15 @@ class SSDMetaArch(model.DetectionModel):
"""
variables_to_restore = {}
for variable in tf.global_variables():
if variable.op.name.startswith(self._extract_features_scope):
var_name = variable.op.name
if from_detection_checkpoint and load_all_detection_checkpoint_vars:
variables_to_restore[var_name] = variable
else:
if var_name.startswith(self._extract_features_scope):
if not from_detection_checkpoint:
var_name = (re.split('^' + self._extract_features_scope + '/',
var_name = (
re.split('^' + self._extract_features_scope + '/',
var_name)[-1])
variables_to_restore[var_name] = variable
return variables_to_restore
......@@ -24,6 +24,7 @@ from object_detection.core import losses
from object_detection.core import post_processing
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.utils import test_case
from object_detection.utils import test_utils
slim = tf.contrib.slim
......@@ -46,7 +47,7 @@ class FakeSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
def extract_features(self, preprocessed_inputs):
with tf.variable_scope('mock_model'):
features = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32,
kernel_size=[1, 1], scope='layer1')
kernel_size=1, scope='layer1')
return [features]
......@@ -64,37 +65,31 @@ class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator):
tf.constant([[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[.5, .5, 1, 1]], tf.float32))
[1., 1., 1.5, 1.5] # Anchor that is outside clip_window.
], tf.float32))
def num_anchors(self):
return 4
class SsdMetaArchTest(tf.test.TestCase):
def setUp(self):
"""Set up mock SSD model.
class SsdMetaArchTest(test_case.TestCase):
Here we set up a simple mock SSD model that will always predict 4
detections that happen to always be exactly the anchors that are set up
in the above MockAnchorGenerator. Because we let max_detections=5,
we will also always end up with an extra padded row in the detection
results.
"""
def _create_model(self, apply_hard_mining=True):
is_training = False
self._num_classes = 1
num_classes = 1
mock_anchor_generator = MockAnchorGenerator2x2()
mock_box_predictor = test_utils.MockBoxPredictor(
is_training, self._num_classes)
is_training, num_classes)
mock_box_coder = test_utils.MockBoxCoder()
fake_feature_extractor = FakeSSDFeatureExtractor()
mock_matcher = test_utils.MockMatcher()
region_similarity_calculator = sim_calc.IouSimilarity()
def image_resizer_fn(image):
return tf.identity(image)
return [tf.identity(image), tf.shape(image)]
classification_loss = losses.WeightedSigmoidClassificationLoss(
anchorwise_output=True)
localization_loss = losses.WeightedSmoothL1LocalizationLoss(
anchorwise_output=True)
classification_loss = losses.WeightedSigmoidClassificationLoss()
localization_loss = losses.WeightedSmoothL1LocalizationLoss()
non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=-20.0,
......@@ -105,48 +100,56 @@ class SsdMetaArchTest(tf.test.TestCase):
localization_loss_weight = 1.0
normalize_loss_by_num_matches = False
hard_example_miner = None
if apply_hard_mining:
# This hard example miner is expected to be a no-op.
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=None,
iou_threshold=1.0)
self._num_anchors = 4
self._code_size = 4
self._model = ssd_meta_arch.SSDMetaArch(
code_size = 4
model = ssd_meta_arch.SSDMetaArch(
is_training, mock_anchor_generator, mock_box_predictor, mock_box_coder,
fake_feature_extractor, mock_matcher, region_similarity_calculator,
image_resizer_fn, non_max_suppression_fn, tf.identity,
classification_loss, localization_loss, classification_loss_weight,
localization_loss_weight, normalize_loss_by_num_matches,
hard_example_miner)
hard_example_miner, add_summaries=False)
return model, num_classes, mock_anchor_generator.num_anchors(), code_size
def test_preprocess_preserves_input_shapes(self):
def test_preprocess_preserves_shapes_with_dynamic_input_image(self):
image_shapes = [(3, None, None, 3),
(None, 10, 10, 3),
(None, None, None, 3)]
model, _, _, _ = self._create_model()
for image_shape in image_shapes:
image_placeholder = tf.placeholder(tf.float32, shape=image_shape)
preprocessed_inputs = self._model.preprocess(image_placeholder)
preprocessed_inputs, _ = model.preprocess(image_placeholder)
self.assertAllEqual(preprocessed_inputs.shape.as_list(), image_shape)
def test_predict_results_have_correct_keys_and_shapes(self):
def test_preprocess_preserves_shape_with_static_input_image(self):
def graph_fn(input_image):
model, _, _, _ = self._create_model()
return model.preprocess(input_image)
input_image = np.random.rand(2, 3, 3, 3).astype(np.float32)
preprocessed_inputs, _ = self.execute(graph_fn, [input_image])
self.assertAllEqual(preprocessed_inputs.shape, [2, 3, 3, 3])
def test_predict_result_shapes_on_image_with_dynamic_shape(self):
batch_size = 3
image_size = 2
input_shapes = [(batch_size, image_size, image_size, 3),
(None, image_size, image_size, 3),
input_shapes = [(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
expected_box_encodings_shape_out = (
batch_size, self._num_anchors, self._code_size)
expected_class_predictions_with_background_shape_out = (
batch_size, self._num_anchors, self._num_classes+1)
for input_shape in input_shapes:
tf_graph = tf.Graph()
with tf_graph.as_default():
model, num_classes, num_anchors, code_size = self._create_model()
preprocessed_input_placeholder = tf.placeholder(tf.float32,
shape=input_shape)
prediction_dict = self._model.predict(preprocessed_input_placeholder)
prediction_dict = model.predict(
preprocessed_input_placeholder, true_image_shapes=None)
self.assertTrue('box_encodings' in prediction_dict)
self.assertTrue('class_predictions_with_background' in prediction_dict)
......@@ -161,12 +164,42 @@ class SsdMetaArchTest(tf.test.TestCase):
preprocessed_input_placeholder:
np.random.uniform(
size=(batch_size, 2, 2, 3))})
expected_box_encodings_shape_out = (batch_size, num_anchors, code_size)
expected_class_predictions_with_background_shape_out = (batch_size,
num_anchors,
num_classes + 1)
self.assertAllEqual(prediction_out['box_encodings'].shape,
expected_box_encodings_shape_out)
self.assertAllEqual(
prediction_out['class_predictions_with_background'].shape,
expected_class_predictions_with_background_shape_out)
def test_predict_result_shapes_on_image_with_static_shape(self):
with tf.Graph().as_default():
_, num_classes, num_anchors, code_size = self._create_model()
def graph_fn(input_image):
model, _, _, _ = self._create_model()
predictions = model.predict(input_image, true_image_shapes=None)
return (predictions['box_encodings'],
predictions['class_predictions_with_background'],
predictions['feature_maps'],
predictions['anchors'])
batch_size = 3
image_size = 2
channels = 3
input_image = np.random.rand(batch_size, image_size, image_size,
channels).astype(np.float32)
expected_box_encodings_shape = (batch_size, num_anchors, code_size)
expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1)
(box_encodings, class_predictions, _, _) = self.execute(graph_fn,
[input_image])
self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape)
self.assertAllEqual(class_predictions.shape,
expected_class_predictions_shape)
def test_postprocess_results_are_correct(self):
batch_size = 2
image_size = 2
......@@ -178,26 +211,30 @@ class SsdMetaArchTest(tf.test.TestCase):
expected_boxes = np.array([[[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[.5, .5, 1, 1],
[0, 0, 0, 0]],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]], # padding
[[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[.5, .5, 1, 1],
[0, 0, 0, 0]]])
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]] # padding
])
expected_scores = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_classes = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_num_detections = np.array([4, 4])
expected_num_detections = np.array([3, 3])
for input_shape in input_shapes:
tf_graph = tf.Graph()
with tf_graph.as_default():
preprocessed_input_placeholder = tf.placeholder(tf.float32,
shape=input_shape)
prediction_dict = self._model.predict(preprocessed_input_placeholder)
detections = self._model.postprocess(prediction_dict)
model, _, _, _ = self._create_model()
input_placeholder = tf.placeholder(tf.float32, shape=input_shape)
preprocessed_inputs, true_image_shapes = model.preprocess(
input_placeholder)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
self.assertTrue('detection_boxes' in detections)
self.assertTrue('detection_scores' in detections)
self.assertTrue('detection_classes' in detections)
......@@ -207,7 +244,7 @@ class SsdMetaArchTest(tf.test.TestCase):
sess.run(init_op)
detections_out = sess.run(detections,
feed_dict={
preprocessed_input_placeholder:
input_placeholder:
np.random.uniform(
size=(batch_size, 2, 2, 3))})
self.assertAllClose(detections_out['detection_boxes'], expected_boxes)
......@@ -217,47 +254,91 @@ class SsdMetaArchTest(tf.test.TestCase):
expected_num_detections)
def test_loss_results_are_correct(self):
batch_size = 2
preprocessed_input = tf.random_uniform((batch_size, 2, 2, 3),
dtype=tf.float32)
groundtruth_boxes_list = [tf.constant([[0, 0, .5, .5]], dtype=tf.float32),
tf.constant([[0, 0, .5, .5]], dtype=tf.float32)]
groundtruth_classes_list = [tf.constant([[1]], dtype=tf.float32),
tf.constant([[1]], dtype=tf.float32)]
self._model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = self._model.predict(preprocessed_input)
loss_dict = self._model.loss(prediction_dict)
self.assertTrue('localization_loss' in loss_dict)
self.assertTrue('classification_loss' in loss_dict)
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model()
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(apply_hard_mining=False)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (loss_dict['localization_loss'], loss_dict['classification_loss'])
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (batch_size * self._num_anchors
* (self._num_classes+1) * np.log(2.0))
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
losses_out = sess.run(loss_dict)
expected_classification_loss = (batch_size * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss,
classification_loss) = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_classes1,
groundtruth_classes2])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_with_hard_example_mining(self):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model()
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model()
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (loss_dict['localization_loss'], loss_dict['classification_loss'])
self.assertAllClose(losses_out['localization_loss'],
expected_localization_loss)
self.assertAllClose(losses_out['classification_loss'],
expected_classification_loss)
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (batch_size * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss, classification_loss) = self.execute_cpu(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_restore_map_for_detection_ckpt(self):
model, _, _, _ = self._create_model()
model.predict(tf.constant(np.array([[[0, 0], [1, 1]], [[1, 0], [0, 1]]],
dtype=np.float32)),
true_image_shapes=None)
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session() as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
var_map = self._model.restore_map(from_detection_checkpoint=True)
var_map = model.restore_map(
from_detection_checkpoint=True,
load_all_detection_checkpoint_vars=False)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn('FeatureExtractor', var.name)
self.assertNotIn('FeatureExtractor', var)
def test_restore_map_for_classification_ckpt(self):
# Define mock tensorflow classification graph and save variables.
......@@ -271,7 +352,7 @@ class SsdMetaArchTest(tf.test.TestCase):
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session() as sess:
with self.test_session(graph=test_graph_classification) as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
......@@ -279,19 +360,39 @@ class SsdMetaArchTest(tf.test.TestCase):
# classification checkpoint.
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model, _, _, _ = self._create_model()
inputs_shape = [2, 2, 2, 3]
inputs = tf.to_float(tf.random_uniform(
inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs = self._model.preprocess(inputs)
prediction_dict = self._model.predict(preprocessed_inputs)
self._model.postprocess(prediction_dict)
var_map = self._model.restore_map(from_detection_checkpoint=False)
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(from_detection_checkpoint=False)
self.assertNotIn('another_variable', var_map)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
with self.test_session() as sess:
with self.test_session(graph=test_graph_detection) as sess:
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn('FeatureExtractor', var.name)
self.assertNotIn('FeatureExtractor', var)
def test_load_all_det_checkpoint_vars(self):
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model, _, _, _ = self._create_model()
inputs_shape = [2, 2, 2, 3]
inputs = tf.to_float(
tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(
from_detection_checkpoint=True,
load_all_detection_checkpoint_vars=True)
self.assertIsInstance(var_map, dict)
self.assertIn('another_variable', var_map)
if __name__ == '__main__':
......
......@@ -8,6 +8,57 @@ licenses(["notice"])
# Apache 2.0
py_library(
name = "coco_tools",
srcs = [
"coco_tools.py",
],
deps = [
"//file/localfile",
"//file/placer",
"//pycocotools",
"//tensorflow",
"//tensorflow/models/research/object_detection/utils:json_utils",
],
)
py_test(
name = "coco_tools_test",
srcs = [
"coco_tools_test.py",
],
deps = [
":coco_tools",
"//testing/pybase",
"//numpy",
],
)
py_library(
name = "coco_evaluation",
srcs = [
"coco_evaluation.py",
],
deps = [
":coco_tools",
"//tensorflow",
"//tensorflow/models/research/object_detection/core:standard_fields",
"//tensorflow/models/research/object_detection/utils:object_detection_evaluation",
],
)
py_test(
name = "coco_evaluation_test",
srcs = [
"coco_evaluation_test.py",
],
deps = [
":coco_evaluation",
"//tensorflow",
"//tensorflow/models/research/object_detection/core:standard_fields",
],
)
py_binary(
name = "offline_eval_map_corloc",
srcs = [
......@@ -15,11 +66,11 @@ py_binary(
],
deps = [
":tf_example_parser",
"//tensorflow_models/object_detection:evaluator",
"//tensorflow_models/object_detection/builders:input_reader_builder",
"//tensorflow_models/object_detection/core:standard_fields",
"//tensorflow_models/object_detection/utils:config_util",
"//tensorflow_models/object_detection/utils:label_map_util",
"//tensorflow/models/research/object_detection:evaluator",
"//tensorflow/models/research/object_detection/builders:input_reader_builder",
"//tensorflow/models/research/object_detection/core:standard_fields",
"//tensorflow/models/research/object_detection/utils:config_util",
"//tensorflow/models/research/object_detection/utils:label_map_util",
],
)
......@@ -39,8 +90,8 @@ py_library(
srcs = ["tf_example_parser.py"],
deps = [
"//tensorflow",
"//tensorflow_models/object_detection/core:data_parser",
"//tensorflow_models/object_detection/core:standard_fields",
"//tensorflow/models/research/object_detection/core:data_parser",
"//tensorflow/models/research/object_detection/core:standard_fields",
],
)
......@@ -50,6 +101,6 @@ py_test(
deps = [
":tf_example_parser",
"//tensorflow",
"//tensorflow_models/object_detection/core:standard_fields",
"//tensorflow/models/research/object_detection/core:standard_fields",
],
)
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for evaluating object detections with COCO metrics."""
import numpy as np
import tensorflow as tf
from object_detection.core import standard_fields
from object_detection.metrics import coco_tools
from object_detection.utils import object_detection_evaluation
class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):
"""Class to evaluate COCO detection metrics."""
def __init__(self,
categories,
include_metrics_per_category=False,
all_metrics_per_category=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
include_metrics_per_category: If True, include metrics for each category.
all_metrics_per_category: Whether to include all the summary metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
"""
super(CocoDetectionEvaluator, self).__init__(categories)
# _image_ids is a dictionary that maps unique image ids to Booleans which
# indicate whether a corresponding detection has been added.
self._image_ids = {}
self._groundtruth_list = []
self._detection_boxes_list = []
self._category_id_set = set([cat['id'] for cat in self._categories])
self._annotation_id = 1
self._metrics = None
self._include_metrics_per_category = include_metrics_per_category
self._all_metrics_per_category = all_metrics_per_category
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._image_ids.clear()
self._groundtruth_list = []
self._detection_boxes_list = []
def add_single_ground_truth_image_info(self,
image_id,
groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
If the image has already been added, a warning is logged, and groundtruth is
ignored.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
"""
if image_id in self._image_ids:
tf.logging.warning('Ignoring ground truth with image id %s since it was '
'previously added', image_id)
return
self._groundtruth_list.extend(
coco_tools.
ExportSingleImageGroundtruthToCoco(
image_id=image_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes],
groundtruth_classes=groundtruth_dict[standard_fields.
InputDataFields.
groundtruth_classes]))
self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes].shape[0]
self._image_ids[image_id] = False
def add_single_detected_image_info(self,
image_id,
detections_dict):
"""Adds detections for a single image to be used for evaluation.
If a detection has already been added for this image id, a warning is
logged, and the detection is skipped.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` detection boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
DetectionResultFields.detection_masks: optional uint8 numpy array of
shape [num_boxes, image_height, image_width] containing instance
masks for the boxes.
Raises:
ValueError: If groundtruth for the image_id is not available.
"""
if image_id not in self._image_ids:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
if self._image_ids[image_id]:
tf.logging.warning('Ignoring detection with image id %s since it was '
'previously added', image_id)
return
self._detection_boxes_list.extend(
coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id=image_id,
category_id_set=self._category_id_set,
detection_boxes=detections_dict[standard_fields.
DetectionResultFields
.detection_boxes],
detection_scores=detections_dict[standard_fields.
DetectionResultFields.
detection_scores],
detection_classes=detections_dict[standard_fields.
DetectionResultFields.
detection_classes]))
self._image_ids[image_id] = True
def evaluate(self):
"""Evaluates the detection boxes and returns a dictionary of coco metrics.
Returns:
A dictionary holding -
1. summary_metrics:
'DetectionBoxes_Precision/mAP': mean average precision over classes
averaged over IOU thresholds ranging from .5 to .95 with .05
increments.
'DetectionBoxes_Precision/mAP@.50IOU': mean average precision at 50% IOU
'DetectionBoxes_Precision/mAP@.75IOU': mean average precision at 75% IOU
'DetectionBoxes_Precision/mAP (small)': mean average precision for small
objects (area < 32^2 pixels).
'DetectionBoxes_Precision/mAP (medium)': mean average precision for
medium sized objects (32^2 pixels < area < 96^2 pixels).
'DetectionBoxes_Precision/mAP (large)': mean average precision for large
objects (96^2 pixels < area < 10000^2 pixels).
'DetectionBoxes_Recall/AR@1': average recall with 1 detection.
'DetectionBoxes_Recall/AR@10': average recall with 10 detections.
'DetectionBoxes_Recall/AR@100': average recall with 100 detections.
'DetectionBoxes_Recall/AR@100 (small)': average recall for small objects
with 100.
'DetectionBoxes_Recall/AR@100 (medium)': average recall for medium objects
with 100.
'DetectionBoxes_Recall/AR@100 (large)': average recall for large objects
with 100 detections.
2. per_category_ap: if include_metrics_per_category is True, category
specific results with keys of the form:
'Precision mAP ByCategory/category' (without the supercategory part if
no supercategories exist). For backward compatibility
'PerformanceByCategory' is included in the output regardless of
all_metrics_per_category.
"""
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id} for image_id in self._image_ids],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_boxes_list)
box_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False)
box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics(
include_metrics_per_category=self._include_metrics_per_category,
all_metrics_per_category=self._all_metrics_per_category)
box_metrics.update(box_per_category_ap)
box_metrics = {'DetectionBoxes_'+ key: value
for key, value in box_metrics.iteritems()}
return box_metrics
def get_estimator_eval_metric_ops(self, image_id, groundtruth_boxes,
groundtruth_classes, detection_boxes,
detection_scores, detection_classes):
"""Returns a dictionary of eval metric ops to use with `tf.EstimatorSpec`.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
Args:
image_id: Unique string/integer identifier for the image.
groundtruth_boxes: float32 tensor of shape [num_boxes, 4] containing
`num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
groundtruth_classes: int32 tensor of shape [num_boxes] containing
1-indexed groundtruth classes for the boxes.
detection_boxes: float32 tensor of shape [num_boxes, 4] containing
`num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax]
in absolute image coordinates.
detection_scores: float32 tensor of shape [num_boxes] containing
detection scores for the boxes.
detection_classes: int32 tensor of shape [num_boxes] containing
1-indexed detection classes for the boxes.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.EstimatorSpec. Note that all update ops
must be run together and similarly all value ops must be run together to
guarantee correct behaviour.
"""
def update_op(
image_id,
groundtruth_boxes,
groundtruth_classes,
detection_boxes,
detection_scores,
detection_classes):
self.add_single_ground_truth_image_info(
image_id,
{'groundtruth_boxes': groundtruth_boxes,
'groundtruth_classes': groundtruth_classes})
self.add_single_detected_image_info(
image_id,
{'detection_boxes': detection_boxes,
'detection_scores': detection_scores,
'detection_classes': detection_classes})
update_op = tf.py_func(update_op, [image_id,
groundtruth_boxes,
groundtruth_classes,
detection_boxes,
detection_scores,
detection_classes], [])
metric_names = ['DetectionBoxes_Precision/mAP',
'DetectionBoxes_Precision/mAP@.50IOU',
'DetectionBoxes_Precision/mAP@.75IOU',
'DetectionBoxes_Precision/mAP (large)',
'DetectionBoxes_Precision/mAP (medium)',
'DetectionBoxes_Precision/mAP (small)',
'DetectionBoxes_Recall/AR@1',
'DetectionBoxes_Recall/AR@10',
'DetectionBoxes_Recall/AR@100',
'DetectionBoxes_Recall/AR@100 (large)',
'DetectionBoxes_Recall/AR@100 (medium)',
'DetectionBoxes_Recall/AR@100 (small)']
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('DetectionBoxes_PerformanceByCategory/mAP/' +
category_dict['name'])
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
def _check_mask_type_and_value(array_name, masks):
"""Checks whether mask dtype is uint8 anf the values are either 0 or 1."""
if masks.dtype != np.uint8:
raise ValueError('{} must be of type np.uint8. Found {}.'.format(
array_name, masks.dtype))
if np.any(np.logical_and(masks != 0, masks != 1)):
raise ValueError('{} elements can only be either 0 or 1.'.format(
array_name))
class CocoMaskEvaluator(object_detection_evaluation.DetectionEvaluator):
"""Class to evaluate COCO detection metrics."""
def __init__(self, categories, include_metrics_per_category=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
include_metrics_per_category: If True, include metrics for each category.
"""
super(CocoMaskEvaluator, self).__init__(categories)
self._image_id_to_mask_shape_map = {}
self._image_ids_with_detections = set([])
self._groundtruth_list = []
self._detection_masks_list = []
self._category_id_set = set([cat['id'] for cat in self._categories])
self._annotation_id = 1
self._include_metrics_per_category = include_metrics_per_category
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._image_id_to_mask_shape_map.clear()
self._image_ids_with_detections.clear()
self._groundtruth_list = []
self._detection_masks_list = []
def add_single_ground_truth_image_info(self,
image_id,
groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape
[num_boxes, image_height, image_width] containing groundtruth masks
corresponding to the boxes. The elements of the array must be in
{0, 1}.
"""
if image_id in self._image_id_to_mask_shape_map:
tf.logging.warning('Ignoring ground truth with image id %s since it was '
'previously added', image_id)
return
groundtruth_instance_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
_check_mask_type_and_value(standard_fields.InputDataFields.
groundtruth_instance_masks,
groundtruth_instance_masks)
self._groundtruth_list.extend(
coco_tools.
ExportSingleImageGroundtruthToCoco(
image_id=image_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes],
groundtruth_classes=groundtruth_dict[standard_fields.
InputDataFields.
groundtruth_classes],
groundtruth_masks=groundtruth_instance_masks))
self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes].shape[0]
self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks].shape
def add_single_detected_image_info(self,
image_id,
detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
DetectionResultFields.detection_masks: optional uint8 numpy array of
shape [num_boxes, image_height, image_width] containing instance
masks corresponding to the boxes. The elements of the array must be
in {0, 1}.
Raises:
ValueError: If groundtruth for the image_id is not available or if
spatial shapes of groundtruth_instance_masks and detection_masks are
incompatible.
"""
if image_id not in self._image_id_to_mask_shape_map:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
if image_id in self._image_ids_with_detections:
tf.logging.warning('Ignoring detection with image id %s since it was '
'previously added', image_id)
return
groundtruth_masks_shape = self._image_id_to_mask_shape_map[image_id]
detection_masks = detections_dict[standard_fields.DetectionResultFields.
detection_masks]
if groundtruth_masks_shape[1:] != detection_masks.shape[1:]:
raise ValueError('Spatial shape of groundtruth masks and detection masks '
'are incompatible: {} vs {}'.format(
groundtruth_masks_shape,
detection_masks.shape))
_check_mask_type_and_value(standard_fields.DetectionResultFields.
detection_masks,
detection_masks)
self._detection_masks_list.extend(
coco_tools.ExportSingleImageDetectionMasksToCoco(
image_id=image_id,
category_id_set=self._category_id_set,
detection_masks=detection_masks,
detection_scores=detections_dict[standard_fields.
DetectionResultFields.
detection_scores],
detection_classes=detections_dict[standard_fields.
DetectionResultFields.
detection_classes]))
self._image_ids_with_detections.update([image_id])
def evaluate(self):
"""Evaluates the detection masks and returns a dictionary of coco metrics.
Returns:
A dictionary holding -
1. summary_metrics:
'Precision/mAP': mean average precision over classes averaged over IOU
thresholds ranging from .5 to .95 with .05 increments
'Precision/mAP@.50IOU': mean average precision at 50% IOU
'Precision/mAP@.75IOU': mean average precision at 75% IOU
'Precision/mAP (small)': mean average precision for small objects
(area < 32^2 pixels)
'Precision/mAP (medium)': mean average precision for medium sized
objects (32^2 pixels < area < 96^2 pixels)
'Precision/mAP (large)': mean average precision for large objects
(96^2 pixels < area < 10000^2 pixels)
'Recall/AR@1': average recall with 1 detection
'Recall/AR@10': average recall with 10 detections
'Recall/AR@100': average recall with 100 detections
'Recall/AR@100 (small)': average recall for small objects with 100
detections
'Recall/AR@100 (medium)': average recall for medium objects with 100
detections
'Recall/AR@100 (large)': average recall for large objects with 100
detections
2. per_category_ap: if include_metrics_per_category is True, category
specific results with keys of the form:
'Precision mAP ByCategory/category' (without the supercategory part if
no supercategories exist). For backward compatibility
'PerformanceByCategory' is included in the output regardless of
all_metrics_per_category.
"""
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id, 'height': shape[1], 'width': shape[2]}
for image_id, shape in self._image_id_to_mask_shape_map.
iteritems()],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(
groundtruth_dict, detection_type='segmentation')
coco_wrapped_detection_masks = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_masks_list)
mask_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth, coco_wrapped_detection_masks,
agnostic_mode=False, iou_type='segm')
mask_metrics, mask_per_category_ap = mask_evaluator.ComputeMetrics(
include_metrics_per_category=self._include_metrics_per_category)
mask_metrics.update(mask_per_category_ap)
mask_metrics = {'DetectionMasks_'+ key: value
for key, value in mask_metrics.iteritems()}
return mask_metrics
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.metrics.coco_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from object_detection.core import standard_fields
from object_detection.metrics import coco_evaluation
class CocoDetectionEvaluationTest(tf.test.TestCase):
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
"""Tests that mAP is calculated correctly on GT and Detections."""
category_list = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image2',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image2',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image3',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image3',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testRejectionOnDuplicateGroundtruth(self):
"""Tests that groundtruth cannot be added more than once for an image."""
categories = [{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'elephant'}]
# Add groundtruth
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories)
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
coco_evaluator.add_single_ground_truth_image_info(image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1
})
groundtruth_lists_len = len(coco_evaluator._groundtruth_list)
# Add groundtruth with the same image id.
coco_evaluator.add_single_ground_truth_image_info(image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1
})
self.assertEqual(groundtruth_lists_len,
len(coco_evaluator._groundtruth_list))
def testRejectionOnDuplicateDetections(self):
"""Tests that detections cannot be added more than once for an image."""
categories = [{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'elephant'}]
# Add groundtruth
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories)
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[99., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
detections_lists_len = len(coco_evaluator._detection_boxes_list)
coco_evaluator.add_single_detected_image_info(
image_id='image1', # Note that this image id was previously added.
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
self.assertEqual(detections_lists_len,
len(coco_evaluator._detection_boxes_list))
def testExceptionRaisedWithMissingGroundtruth(self):
"""Tests that exception is raised for detection with missing groundtruth."""
categories = [{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'elephant'}]
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories)
with self.assertRaises(ValueError):
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
class CocoEvaluationPyFuncTest(tf.test.TestCase):
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
category_list = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(
image_id, groundtruth_boxes,
groundtruth_classes,
detection_boxes,
detection_scores,
detection_classes)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(update_op,
feed_dict={
image_id: 'image1',
groundtruth_boxes: np.array([[100., 100., 200., 200.]]),
groundtruth_classes: np.array([1]),
detection_boxes: np.array([[100., 100., 200., 200.]]),
detection_scores: np.array([.8]),
detection_classes: np.array([1])
})
sess.run(update_op,
feed_dict={
image_id: 'image2',
groundtruth_boxes: np.array([[50., 50., 100., 100.]]),
groundtruth_classes: np.array([3]),
detection_boxes: np.array([[50., 50., 100., 100.]]),
detection_scores: np.array([.7]),
detection_classes: np.array([3])
})
sess.run(update_op,
feed_dict={
image_id: 'image3',
groundtruth_boxes: np.array([[25., 25., 50., 50.]]),
groundtruth_classes: np.array([2]),
detection_boxes: np.array([[25., 25., 50., 50.]]),
detection_scores: np.array([.9]),
detection_classes: np.array([2])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
-1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
-1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
class CocoMaskEvaluationTest(tf.test.TestCase):
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
category_list = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
coco_evaluator = coco_evaluation.CocoMaskEvaluator(category_list)
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1]),
standard_fields.InputDataFields.groundtruth_instance_masks:
np.pad(np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_masks:
np.pad(np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image2',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1]),
standard_fields.InputDataFields.groundtruth_instance_masks:
np.pad(np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_detected_image_info(
image_id='image2',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_masks:
np.pad(np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image3',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1]),
standard_fields.InputDataFields.groundtruth_instance_masks:
np.pad(np.ones([1, 25, 25], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_detected_image_info(
image_id='image3',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_masks:
np.pad(np.ones([1, 25, 25], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0)
coco_evaluator.clear()
self.assertFalse(coco_evaluator._image_id_to_mask_shape_map)
self.assertFalse(coco_evaluator._image_ids_with_detections)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_masks_list)
if __name__ == '__main__':
tf.test.main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for third party pycocotools to be used within object_detection.
Note that nothing in this file is tensorflow related and thus cannot
be called directly as a slim metric, for example.
TODO: wrap as a slim metric in metrics.py
Usage example: given a set of images with ids in the list image_ids
and corresponding lists of numpy arrays encoding groundtruth (boxes and classes)
and detections (boxes, scores and classes), where elements of each list
correspond to detections/annotations of a single image,
then evaluation (in multi-class mode) can be invoked as follows:
groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(
image_ids, groundtruth_boxes_list, groundtruth_classes_list,
max_num_classes, output_path=None)
detections_list = coco_tools.ExportDetectionsToCOCO(
image_ids, detection_boxes_list, detection_scores_list,
detection_classes_list, output_path=None)
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
from collections import OrderedDict
import copy
import time
import numpy as np
from pycocotools import coco
from pycocotools import cocoeval
from pycocotools import mask
import tensorflow as tf
from object_detection.utils import json_utils
class COCOWrapper(coco.COCO):
"""Wrapper for the pycocotools COCO class."""
def __init__(self, dataset, detection_type='bbox'):
"""COCOWrapper constructor.
See http://mscoco.org/dataset/#format for a description of the format.
By default, the coco.COCO class constructor reads from a JSON file.
This function duplicates the same behavior but loads from a dictionary,
allowing us to perform evaluation without writing to external storage.
Args:
dataset: a dictionary holding bounding box annotations in the COCO format.
detection_type: type of detections being wrapped. Can be one of ['bbox',
'segmentation']
Raises:
ValueError: if detection_type is unsupported.
"""
supported_detection_types = ['bbox', 'segmentation']
if detection_type not in supported_detection_types:
raise ValueError('Unsupported detection type: {}. '
'Supported values are: {}'.format(
detection_type, supported_detection_types))
self._detection_type = detection_type
coco.COCO.__init__(self)
self.dataset = dataset
self.createIndex()
def LoadAnnotations(self, annotations):
"""Load annotations dictionary into COCO datastructure.
See http://mscoco.org/dataset/#format for a description of the annotations
format. As above, this function replicates the default behavior of the API
but does not require writing to external storage.
Args:
annotations: python list holding object detection results where each
detection is encoded as a dict with required keys ['image_id',
'category_id', 'score'] and one of ['bbox', 'segmentation'] based on
`detection_type`.
Returns:
a coco.COCO datastructure holding object detection annotations results
Raises:
ValueError: if annotations is not a list
ValueError: if annotations do not correspond to the images contained
in self.
"""
results = coco.COCO()
results.dataset['images'] = [img for img in self.dataset['images']]
tf.logging.info('Loading and preparing annotation results...')
tic = time.time()
if not isinstance(annotations, list):
raise ValueError('annotations is not a list of objects')
annotation_img_ids = [ann['image_id'] for ann in annotations]
if (set(annotation_img_ids) != (set(annotation_img_ids)
& set(self.getImgIds()))):
raise ValueError('Results do not correspond to current coco set')
results.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
if self._detection_type == 'bbox':
for idx, ann in enumerate(annotations):
bb = ann['bbox']
ann['area'] = bb[2] * bb[3]
ann['id'] = idx + 1
ann['iscrowd'] = 0
elif self._detection_type == 'segmentation':
for idx, ann in enumerate(annotations):
ann['area'] = mask.area(ann['segmentation'])
ann['bbox'] = mask.toBbox(ann['segmentation'])
ann['id'] = idx + 1
ann['iscrowd'] = 0
tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic))
results.dataset['annotations'] = annotations
results.createIndex()
return results
class COCOEvalWrapper(cocoeval.COCOeval):
"""Wrapper for the pycocotools COCOeval class.
To evaluate, create two objects (groundtruth_dict and detections_list)
using the conventions listed at http://mscoco.org/dataset/#format.
Then call evaluation as follows:
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False,
iou_type='bbox'):
"""COCOEvalWrapper constructor.
Note that for the area-based metrics to be meaningful, detection and
groundtruth boxes must be in image coordinates measured in pixels.
Args:
groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding
groundtruth annotations
detections: a coco.COCO (or coco_tools.COCOWrapper) object holding
detections
agnostic_mode: boolean (default: False). If True, evaluation ignores
class labels, treating all detections as proposals.
iou_type: IOU type to use for evaluation. Supports `bbox` or `segm`.
"""
cocoeval.COCOeval.__init__(self, groundtruth, detections,
iouType=iou_type)
if agnostic_mode:
self.params.useCats = 0
def GetCategory(self, category_id):
"""Fetches dictionary holding category information given category id.
Args:
category_id: integer id
Returns:
dictionary holding 'id', 'name'.
"""
return self.cocoGt.cats[category_id]
def GetAgnosticMode(self):
"""Returns true if COCO Eval is configured to evaluate in agnostic mode."""
return self.params.useCats == 0
def GetCategoryIdList(self):
"""Returns list of valid category ids."""
return self.params.catIds
def ComputeMetrics(self,
include_metrics_per_category=False,
all_metrics_per_category=False):
"""Computes detection metrics.
Args:
include_metrics_per_category: If True, will include metrics per category.
all_metrics_per_category: If true, include all the summery metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
Returns:
1. summary_metrics: a dictionary holding:
'Precision/mAP': mean average precision over classes averaged over IOU
thresholds ranging from .5 to .95 with .05 increments
'Precision/mAP@.50IOU': mean average precision at 50% IOU
'Precision/mAP@.75IOU': mean average precision at 75% IOU
'Precision/mAP (small)': mean average precision for small objects
(area < 32^2 pixels)
'Precision/mAP (medium)': mean average precision for medium sized
objects (32^2 pixels < area < 96^2 pixels)
'Precision/mAP (large)': mean average precision for large objects
(96^2 pixels < area < 10000^2 pixels)
'Recall/AR@1': average recall with 1 detection
'Recall/AR@10': average recall with 10 detections
'Recall/AR@100': average recall with 100 detections
'Recall/AR@100 (small)': average recall for small objects with 100
detections
'Recall/AR@100 (medium)': average recall for medium objects with 100
detections
'Recall/AR@100 (large)': average recall for large objects with 100
detections
2. per_category_ap: a dictionary holding category specific results with
keys of the form: 'Precision mAP ByCategory/category'
(without the supercategory part if no supercategories exist).
For backward compatibility 'PerformanceByCategory' is included in the
output regardless of all_metrics_per_category.
If evaluating class-agnostic mode, per_category_ap is an empty
dictionary.
Raises:
ValueError: If category_stats does not exist.
"""
self.evaluate()
self.accumulate()
self.summarize()
summary_metrics = OrderedDict([
('Precision/mAP', self.stats[0]),
('Precision/mAP@.50IOU', self.stats[1]),
('Precision/mAP@.75IOU', self.stats[2]),
('Precision/mAP (small)', self.stats[3]),
('Precision/mAP (medium)', self.stats[4]),
('Precision/mAP (large)', self.stats[5]),
('Recall/AR@1', self.stats[6]),
('Recall/AR@10', self.stats[7]),
('Recall/AR@100', self.stats[8]),
('Recall/AR@100 (small)', self.stats[9]),
('Recall/AR@100 (medium)', self.stats[10]),
('Recall/AR@100 (large)', self.stats[11])
])
if not include_metrics_per_category:
return summary_metrics, {}
if not hasattr(self, 'category_stats'):
raise ValueError('Category stats do not exist')
per_category_ap = OrderedDict([])
if self.GetAgnosticMode():
return summary_metrics, per_category_ap
for category_index, category_id in enumerate(self.GetCategoryIdList()):
category = self.GetCategory(category_id)['name']
# Kept for backward compatilbility
per_category_ap['PerformanceByCategory/mAP/{}'.format(
category)] = self.category_stats[0][category_index]
if all_metrics_per_category:
per_category_ap['Precision mAP ByCategory/{}'.format(
category)] = self.category_stats[0][category_index]
per_category_ap['Precision mAP@.50IOU ByCategory/{}'.format(
category)] = self.category_stats[1][category_index]
per_category_ap['Precision mAP@.75IOU ByCategory/{}'.format(
category)] = self.category_stats[2][category_index]
per_category_ap['Precision mAP (small) ByCategory/{}'.format(
category)] = self.category_stats[3][category_index]
per_category_ap['Precision mAP (medium) ByCategory/{}'.format(
category)] = self.category_stats[4][category_index]
per_category_ap['Precision mAP (large) ByCategory/{}'.format(
category)] = self.category_stats[5][category_index]
per_category_ap['Recall AR@1 ByCategory/{}'.format(
category)] = self.category_stats[6][category_index]
per_category_ap['Recall AR@10 ByCategory/{}'.format(
category)] = self.category_stats[7][category_index]
per_category_ap['Recall AR@100 ByCategory/{}'.format(
category)] = self.category_stats[8][category_index]
per_category_ap['Recall AR@100 (small) ByCategory/{}'.format(
category)] = self.category_stats[9][category_index]
per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format(
category)] = self.category_stats[10][category_index]
per_category_ap['Recall AR@100 (large) ByCategory/{}'.format(
category)] = self.category_stats[11][category_index]
return summary_metrics, per_category_ap
def _ConvertBoxToCOCOFormat(box):
"""Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.
This is a utility function for converting from our internal
[ymin, xmin, ymax, xmax] convention to the convention used by the COCO API
i.e., [xmin, ymin, width, height].
Args:
box: a [ymin, xmin, ymax, xmax] numpy array
Returns:
a list of floats representing [xmin, ymin, width, height]
"""
return [float(box[1]), float(box[0]), float(box[3] - box[1]),
float(box[2] - box[0])]
def _RleCompress(masks):
"""Compresses mask using Run-length encoding provided by pycocotools.
Args:
masks: uint8 numpy array of shape [mask_height, mask_width] with values in
{0, 1}.
Returns:
A pycocotools Run-length encoding of the mask.
"""
return mask.encode(np.asfortranarray(masks))
def ExportSingleImageGroundtruthToCoco(image_id,
next_annotation_id,
category_id_set,
groundtruth_boxes,
groundtruth_classes,
groundtruth_masks=None):
"""Export groundtruth of a single image to COCO format.
This function converts groundtruth detection annotations represented as numpy
arrays to dictionaries that can be ingested by the COCO evaluation API. Note
that the image_ids provided here must match the ones given to
ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in
correspondence - that is: groundtruth_boxes[i, :], and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box and "iscrowd" fields are always set to 0.
TODO: pass in "iscrowd" array for evaluating on COCO dataset.
Args:
image_id: a unique image identifier either of type integer or string.
next_annotation_id: integer specifying the first id to use for the
groundtruth annotations. All annotations are assigned a continuous integer
id starting from this value.
category_id_set: A set of valid class ids. Groundtruth with classes not in
category_id_set are dropped.
groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]
groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_masks: optional uint8 numpy array of shape [num_detections,
image_height, image_width] containing detection_masks.
Returns:
a list of groundtruth annotations for a single image in the COCO format.
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
if len(groundtruth_classes.shape) != 1:
raise ValueError('groundtruth_classes is '
'expected to be of rank 1.')
if len(groundtruth_boxes.shape) != 2:
raise ValueError('groundtruth_boxes is expected to be of '
'rank 2.')
if groundtruth_boxes.shape[1] != 4:
raise ValueError('groundtruth_boxes should have '
'shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if num_boxes != groundtruth_boxes.shape[0]:
raise ValueError('Corresponding entries in groundtruth_classes, '
'and groundtruth_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).'
'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (
groundtruth_classes.shape[0],
groundtruth_boxes.shape[0], image_id))
groundtruth_list = []
for i in range(num_boxes):
if groundtruth_classes[i] in category_id_set:
export_dict = {
'id': next_annotation_id + i,
'image_id': image_id,
'category_id': int(groundtruth_classes[i]),
'bbox': list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),
'area': float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) *
(groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1])),
'iscrowd': 0
}
if groundtruth_masks is not None:
export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])
groundtruth_list.append(export_dict)
return groundtruth_list
def ExportGroundtruthToCOCO(image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=None):
"""Export groundtruth detection annotations in numpy arrays to COCO API.
This function converts a set of groundtruth detection annotations represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are three lists: image ids for each groundtruth image,
groundtruth boxes for each image and groundtruth classes respectively.
Note that the image_ids provided here must match the ones given to the
ExportDetectionsToCOCO function in order for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box and "iscrowd" fields are always set to 0.
TODO: pass in "iscrowd" array for evaluating on COCO dataset.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]
(note that num_gt_boxes can be different for each entry in the list)
groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]
(note that num_gt_boxes can be different for each entry in the list)
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
output_path: (optional) path for exporting result to JSON
Returns:
dictionary that can be read by COCO API
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
category_id_set = set([cat['id'] for cat in categories])
groundtruth_export_list = []
image_export_list = []
if not len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes):
raise ValueError('Input lists must have the same length')
# For reasons internal to the COCO API, it is important that annotation ids
# are not equal to zero; we thus start counting from 1.
annotation_id = 1
for image_id, boxes, classes in zip(image_ids, groundtruth_boxes,
groundtruth_classes):
image_export_list.append({'id': image_id})
groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco(
image_id,
annotation_id,
category_id_set,
boxes,
classes))
num_boxes = classes.shape[0]
annotation_id += num_boxes
groundtruth_dict = {
'annotations': groundtruth_export_list,
'images': image_export_list,
'categories': categories
}
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2)
return groundtruth_dict
def ExportSingleImageDetectionBoxesToCoco(image_id,
category_id_set,
detection_boxes,
detection_scores,
detection_classes):
"""Export detections of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. Note that the image_ids
provided here must match the ones given to the
ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in
correspondence - that is: boxes[i, :], and classes[i]
are associated with the same groundtruth annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_boxes: float numpy array of shape [num_detections, 4] containing
detection boxes.
detection_scores: float numpy array of shape [num_detections] containing
scored for the detection boxes.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection boxes.
Returns:
a list of detection annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_boxes, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(detection_boxes.shape) != 2:
raise ValueError('All entries in detection_boxes expected to be of '
'rank 2.')
if detection_boxes.shape[1] != 4:
raise ValueError('All entries in detection_boxes should have '
'shape[1] == 4.')
num_boxes = detection_classes.shape[0]
if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension). '
'Classes shape: %d. Boxes shape: %d. '
'Scores shape: %d' % (
detection_classes.shape[0], detection_boxes.shape[0],
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': image_id,
'category_id': int(detection_classes[i]),
'bbox': list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])),
'score': float(detection_scores[i])
})
return detections_list
def ExportSingleImageDetectionMasksToCoco(image_id,
category_id_set,
detection_masks,
detection_scores,
detection_classes):
"""Export detection masks of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. We assume that
detection_masks, detection_scores, and detection_classes are in correspondence
- that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]
are associated with the same annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
detection_scores: float numpy array of shape [num_detections] containing
scores for detection masks.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection masks.
Returns:
a list of detection mask annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_masks, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if not num_boxes == len(detection_masks) == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_masks should have '
'compatible lengths and shapes '
'Classes length: %d. Masks length: %d. '
'Scores length: %d' % (
detection_classes.shape[0], len(detection_masks),
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': image_id,
'category_id': int(detection_classes[i]),
'segmentation': _RleCompress(detection_masks[i]),
'score': float(detection_scores[i])
})
return detections_list
def ExportDetectionsToCOCO(image_ids,
detection_boxes,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export detection annotations in numpy arrays to COCO API.
This function converts a set of predicted detections represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of boxes, scores and
classes, respectively, corresponding to each image for which detections
have been produced. Note that the image_ids provided here must
match the ones given to the ExportGroundtruthToCOCO function in order
for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: detection_boxes[i, :], detection_scores[i] and
detection_classes[i] are associated with the same detection.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]
detection_scores: list of numpy arrays (float) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'bbox', 'score'].
Raises:
ValueError: if (1) detection_boxes and detection_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers.
"""
category_id_set = set([cat['id'] for cat in categories])
detections_export_list = []
if not (len(image_ids) == len(detection_boxes) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
for image_id, boxes, scores, classes in zip(image_ids, detection_boxes,
detection_scores,
detection_classes):
detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco(
image_id,
category_id_set,
boxes,
scores,
classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2)
return detections_export_list
def ExportSegmentsToCOCO(image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export segmentation masks in numpy arrays to COCO API.
This function converts a set of predicted instance masks represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of segments, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
Note this function is recommended to use for small dataset.
For large dataset, it should be used with a merge function
(e.g. in map reduce), otherwise the memory consumption is large.
We assume that for each image, masks, scores and classes are in
correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]
and detection_classes[i] are associated with the same detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]
and type uint8. The height and width should match the shape of
corresponding image.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'segmentation', 'score'].
Raises:
ValueError: if detection_masks and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_masks) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
segment_export_list = []
for image_id, masks, scores, classes in zip(image_ids, detection_masks,
detection_scores,
detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(masks.shape) != 4:
raise ValueError('All entries in masks expected to be of '
'rank 4. Given {}'.format(masks.shape))
num_boxes = classes.shape[0]
if not num_boxes == masks.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in segment_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
segment_export_list.extend(ExportSingleImageDetectionMasksToCoco(
image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2)
return segment_export_list
def ExportKeypointsToCOCO(image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Exports keypoints in numpy arrays to COCO API.
This function converts a set of predicted keypoints represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of keypoints, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
We assume that for each image, keypoints, scores and classes are in
correspondence --- that is: detection_keypoints[i, :, :, :],
detection_scores[i] and detection_classes[i] are associated with the same
detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_keypoints: list of numpy arrays with shape
[num_detection, num_keypoints, 2] and type float32 in absolute
x-y coordinates.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category and an integer 'num_keypoints' key specifying the number of
keypoints the category has.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'keypoints', 'score'].
Raises:
ValueError: if detection_keypoints and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_keypoints) ==
len(detection_scores) == len(detection_classes)):
raise ValueError('Input lists must have the same length')
keypoints_export_list = []
for image_id, keypoints, scores, classes in zip(
image_ids, detection_keypoints, detection_scores, detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(keypoints.shape) != 3:
raise ValueError('All entries in keypoints expected to be of '
'rank 3. Given {}'.format(keypoints.shape))
num_boxes = classes.shape[0]
if not num_boxes == keypoints.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_keypoints, and detection_scores should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
category_id_to_num_keypoints_map = {
cat['id']: cat['num_keypoints'] for cat in categories
if 'num_keypoints' in cat}
for i in range(num_boxes):
if classes[i] not in category_id_set:
raise ValueError('class id should be in category_id_set\n')
if classes[i] in category_id_to_num_keypoints_map:
num_keypoints = category_id_to_num_keypoints_map[classes[i]]
# Adds extra ones to indicate the visibility for each keypoint as is
# recommended by MSCOCO.
instance_keypoints = np.concatenate(
[keypoints[i, 0:num_keypoints, :],
np.expand_dims(np.ones(num_keypoints), axis=1)],
axis=1).astype(int)
instance_keypoints = instance_keypoints.flatten().tolist()
keypoints_export_list.append({
'image_id': image_id,
'category_id': int(classes[i]),
'keypoints': instance_keypoints,
'score': float(scores[i])
})
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2)
return keypoints_export_list
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_model.object_detection.metrics.coco_tools."""
import json
import os
import re
import numpy as np
from pycocotools import mask
import tensorflow as tf
from object_detection.metrics import coco_tools
class CocoToolsTest(tf.test.TestCase):
def setUp(self):
groundtruth_annotations_list = [
{
'id': 1,
'image_id': 'first',
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'area': 100.**2,
'iscrowd': 0
},
{
'id': 2,
'image_id': 'second',
'category_id': 1,
'bbox': [50., 50., 50., 50.],
'area': 50.**2,
'iscrowd': 0
},
]
image_list = [{'id': 'first'}, {'id': 'second'}]
category_list = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
self._groundtruth_dict = {
'annotations': groundtruth_annotations_list,
'images': image_list,
'categories': category_list
}
self._detections_list = [
{
'image_id': 'first',
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'score': .8
},
{
'image_id': 'second',
'category_id': 1,
'bbox': [50., 50., 50., 50.],
'score': .7
},
]
def testCocoWrappers(self):
groundtruth = coco_tools.COCOWrapper(self._groundtruth_dict)
detections = groundtruth.LoadAnnotations(self._detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections)
summary_metrics, _ = evaluator.ComputeMetrics()
self.assertAlmostEqual(1.0, summary_metrics['Precision/mAP'])
def testExportGroundtruthToCOCO(self):
image_ids = ['first', 'second']
groundtruth_boxes = [np.array([[100, 100, 200, 200]], np.float),
np.array([[50, 50, 100, 100]], np.float)]
groundtruth_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'groundtruth.json')
result = coco_tools.ExportGroundtruthToCOCO(
image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=output_path)
self.assertDictEqual(result, self._groundtruth_dict)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
# The json output should have floats written to 4 digits of precision.
matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE)
self.assertTrue(matcher.findall(written_result))
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testExportDetectionsToCOCO(self):
image_ids = ['first', 'second']
detections_boxes = [np.array([[100, 100, 200, 200]], np.float),
np.array([[50, 50, 100, 100]], np.float)]
detections_scores = [np.array([.8], np.float), np.array([.7], np.float)]
detections_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'detections.json')
result = coco_tools.ExportDetectionsToCOCO(
image_ids,
detections_boxes,
detections_scores,
detections_classes,
categories,
output_path=output_path)
self.assertListEqual(result, self._detections_list)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
# The json output should have floats written to 4 digits of precision.
matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE)
self.assertTrue(matcher.findall(written_result))
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testExportSegmentsToCOCO(self):
image_ids = ['first', 'second']
detection_masks = [np.array(
[[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]],
dtype=np.uint8), np.array(
[[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]],
dtype=np.uint8)]
for i, detection_mask in enumerate(detection_masks):
detection_masks[i] = detection_mask[:, :, :, None]
detection_scores = [np.array([.8], np.float), np.array([.7], np.float)]
detection_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'segments.json')
result = coco_tools.ExportSegmentsToCOCO(
image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=output_path)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
written_result = json.loads(written_result)
mask_load = mask.decode([written_result[0]['segmentation']])
self.assertTrue(np.allclose(mask_load, detection_masks[0]))
self.assertAlmostEqual(result, written_result)
def testExportKeypointsToCOCO(self):
image_ids = ['first', 'second']
detection_keypoints = [
np.array(
[[[100, 200], [300, 400], [500, 600]],
[[50, 150], [250, 350], [450, 550]]], dtype=np.int32),
np.array(
[[[110, 210], [310, 410], [510, 610]],
[[60, 160], [260, 360], [460, 560]]], dtype=np.int32)]
detection_scores = [np.array([.8, 0.2], np.float),
np.array([.7, 0.3], np.float)]
detection_classes = [np.array([1, 1], np.int32), np.array([1, 1], np.int32)]
categories = [{'id': 1, 'name': 'person', 'num_keypoints': 3},
{'id': 2, 'name': 'cat'},
{'id': 3, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'keypoints.json')
result = coco_tools.ExportKeypointsToCOCO(
image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=output_path)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testSingleImageDetectionBoxesExport(self):
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
detection_boxes=boxes,
detection_classes=classes,
detection_scores=scores)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertAlmostEqual(annotation['score'], scores[i])
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
def testSingleImageDetectionMaskExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
coco_annotations = coco_tools.ExportSingleImageDetectionMasksToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
detection_classes=classes,
detection_scores=scores,
detection_masks=masks)
expected_counts = ['04', '31', '4']
for i, mask_annotation in enumerate(coco_annotations):
self.assertEqual(mask_annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
mask_annotation['segmentation']), masks[i])))
self.assertEqual(mask_annotation['image_id'], 'first_image')
self.assertEqual(mask_annotation['category_id'], classes[i])
self.assertAlmostEqual(mask_annotation['score'], scores[i])
def testSingleImageGroundtruthExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32)
next_annotation_id = 1
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks)
expected_counts = ['04', '31', '4']
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
if __name__ == '__main__':
tf.test.main()
......@@ -22,7 +22,7 @@ The evaluation metrics set is supplied in object_detection.protos.EvalConfig
in metrics_set field.
Currently two set of metrics are supported:
- pascal_voc_metrics: standard PASCAL VOC 2007 metric
- open_images_metrics: Open Image V2 metric
- open_images_detection_metrics: Open Image V2 metric
All other field of object_detection.protos.EvalConfig are ignored.
Example usage:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment