Commit c308c03c authored by Mehdi Sharifzadeh's avatar Mehdi Sharifzadeh Committed by Taylor Robie
Browse files

Mask R-CNN model added to models/research/mlperf_object_detection/Mask_RCNN (#4678)

* Create README.md

* readme changed

* readme changed

* ResNet backbone completed.

* FPN added

* Create README.md

* initial commit

* files removed

* initial commit

* protobuf file removed
parent 32e7d660
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.mean_stddev_boxcoder."""
import tensorflow as tf
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_list
class MeanStddevBoxCoderTest(tf.test.TestCase):
def testGetCorrectRelativeCodesAfterEncoding(self):
box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]]
boxes = box_list.BoxList(tf.constant(box_corners))
expected_rel_codes = [[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]]
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]])
priors = box_list.BoxList(prior_means)
coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
rel_codes = coder.encode(boxes, priors)
with self.test_session() as sess:
rel_codes_out = sess.run(rel_codes)
self.assertAllClose(rel_codes_out, expected_rel_codes)
def testGetCorrectBoxesAfterDecoding(self):
rel_codes = tf.constant([[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]])
expected_box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]]
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]])
priors = box_list.BoxList(prior_means)
coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
decoded_boxes = coder.decode(rel_codes, priors)
decoded_box_corners = decoded_boxes.get()
with self.test_session() as sess:
decoded_out = sess.run(decoded_box_corners)
self.assertAllClose(decoded_out, expected_box_corners)
if __name__ == '__main__':
tf.test.main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Square box coder.
Square box coder follows the coding schema described below:
l = sqrt(h * w)
la = sqrt(ha * wa)
ty = (y - ya) / la
tx = (x - xa) / la
tl = log(l / la)
where x, y, w, h denote the box's center coordinates, width, and height,
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tl denote the anchor-encoded
center, and length, respectively. Because the encoded box is a square, only
one length is encoded.
This has shown to provide performance improvements over the Faster RCNN box
coder when the objects being detected tend to be square (e.g. faces) and when
the input images are not distorted via resizing.
"""
import tensorflow as tf
from object_detection.core import box_coder
from object_detection.core import box_list
EPSILON = 1e-8
class SquareBoxCoder(box_coder.BoxCoder):
"""Encodes a 3-scalar representation of a square box."""
def __init__(self, scale_factors=None):
"""Constructor for SquareBoxCoder.
Args:
scale_factors: List of 3 positive scalars to scale ty, tx, and tl.
If set to None, does not perform scaling. For faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0].
Raises:
ValueError: If scale_factors is not length 3 or contains values less than
or equal to 0.
"""
if scale_factors:
if len(scale_factors) != 3:
raise ValueError('The argument scale_factors must be a list of length '
'3.')
if any(scalar <= 0 for scalar in scale_factors):
raise ValueError('The values in scale_factors must all be greater '
'than 0.')
self._scale_factors = scale_factors
@property
def code_size(self):
return 3
def _encode(self, boxes, anchors):
"""Encodes a box collection with respect to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, tl].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
la = tf.sqrt(ha * wa)
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
l = tf.sqrt(h * w)
# Avoid NaN in division and log below.
la += EPSILON
l += EPSILON
tx = (xcenter - xcenter_a) / la
ty = (ycenter - ycenter_a) / la
tl = tf.log(l / la)
# Scales location targets for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
tl *= self._scale_factors[2]
return tf.transpose(tf.stack([ty, tx, tl]))
def _decode(self, rel_codes, anchors):
"""Decodes relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
la = tf.sqrt(ha * wa)
ty, tx, tl = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
tl /= self._scale_factors[2]
l = tf.exp(tl) * la
ycenter = ty * la + ycenter_a
xcenter = tx * la + xcenter_a
ymin = ycenter - l / 2.
xmin = xcenter - l / 2.
ymax = ycenter + l / 2.
xmax = xcenter + l / 2.
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.square_box_coder."""
import tensorflow as tf
from object_detection.box_coders import square_box_coder
from object_detection.core import box_list
class SquareBoxCoderTest(tf.test.TestCase):
def test_correct_relative_codes_with_default_scale(self):
boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
scale_factors = None
expected_rel_codes = [[-0.790569, -0.263523, -0.293893],
[-0.068041, -0.272166, -0.89588]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_correct_relative_codes_with_non_default_scale(self):
boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
scale_factors = [2, 3, 4]
expected_rel_codes = [[-1.581139, -0.790569, -1.175573],
[-0.136083, -0.816497, -3.583519]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_correct_relative_codes_with_small_width(self):
boxes = [[10.0, 10.0, 10.0000001, 20.0]]
anchors = [[15.0, 12.0, 30.0, 18.0]]
scale_factors = None
expected_rel_codes = [[-1.317616, 0., -20.670586]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_correct_boxes_with_default_scale(self):
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
rel_codes = [[-0.5, -0.416666, -0.405465],
[-0.083333, -0.222222, -0.693147]]
scale_factors = None
expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432],
[0.155051, 0.102989, 0.522474, 0.470412]]
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors)
with self.test_session() as sess:
(boxes_out,) = sess.run([boxes.get()])
self.assertAllClose(boxes_out, expected_boxes)
def test_correct_boxes_with_non_default_scale(self):
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
rel_codes = [[-1., -1.25, -1.62186], [-0.166667, -0.666667, -2.772588]]
scale_factors = [2, 3, 4]
expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432],
[0.155051, 0.102989, 0.522474, 0.470412]]
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors)
with self.test_session() as sess:
(boxes_out,) = sess.run([boxes.get()])
self.assertAllClose(boxes_out, expected_boxes)
if __name__ == '__main__':
tf.test.main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection anchor generator from config."""
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.anchor_generators import multiple_grid_anchor_generator
from object_detection.anchor_generators import multiscale_grid_anchor_generator
from object_detection.protos import anchor_generator_pb2
def build(anchor_generator_config):
"""Builds an anchor generator based on the config.
Args:
anchor_generator_config: An anchor_generator.proto object containing the
config for the desired anchor generator.
Returns:
Anchor generator based on the config.
Raises:
ValueError: On empty anchor generator proto.
"""
if not isinstance(anchor_generator_config,
anchor_generator_pb2.AnchorGenerator):
raise ValueError('anchor_generator_config not of type '
'anchor_generator_pb2.AnchorGenerator')
if anchor_generator_config.WhichOneof(
'anchor_generator_oneof') == 'grid_anchor_generator':
grid_anchor_generator_config = anchor_generator_config.grid_anchor_generator
return grid_anchor_generator.GridAnchorGenerator(
scales=[float(scale) for scale in grid_anchor_generator_config.scales],
aspect_ratios=[float(aspect_ratio)
for aspect_ratio
in grid_anchor_generator_config.aspect_ratios],
base_anchor_size=[grid_anchor_generator_config.height,
grid_anchor_generator_config.width],
anchor_stride=[grid_anchor_generator_config.height_stride,
grid_anchor_generator_config.width_stride],
anchor_offset=[grid_anchor_generator_config.height_offset,
grid_anchor_generator_config.width_offset])
elif anchor_generator_config.WhichOneof(
'anchor_generator_oneof') == 'ssd_anchor_generator':
ssd_anchor_generator_config = anchor_generator_config.ssd_anchor_generator
anchor_strides = None
if ssd_anchor_generator_config.height_stride:
anchor_strides = zip(ssd_anchor_generator_config.height_stride,
ssd_anchor_generator_config.width_stride)
anchor_offsets = None
if ssd_anchor_generator_config.height_offset:
anchor_offsets = zip(ssd_anchor_generator_config.height_offset,
ssd_anchor_generator_config.width_offset)
return multiple_grid_anchor_generator.create_ssd_anchors(
num_layers=ssd_anchor_generator_config.num_layers,
min_scale=ssd_anchor_generator_config.min_scale,
max_scale=ssd_anchor_generator_config.max_scale,
scales=[float(scale) for scale in ssd_anchor_generator_config.scales],
aspect_ratios=ssd_anchor_generator_config.aspect_ratios,
interpolated_scale_aspect_ratio=(
ssd_anchor_generator_config.interpolated_scale_aspect_ratio),
base_anchor_size=[
ssd_anchor_generator_config.base_anchor_height,
ssd_anchor_generator_config.base_anchor_width
],
anchor_strides=anchor_strides,
anchor_offsets=anchor_offsets,
reduce_boxes_in_lowest_layer=(
ssd_anchor_generator_config.reduce_boxes_in_lowest_layer))
elif anchor_generator_config.WhichOneof(
'anchor_generator_oneof') == 'multiscale_anchor_generator':
cfg = anchor_generator_config.multiscale_anchor_generator
return multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator(
cfg.min_level,
cfg.max_level,
cfg.anchor_scale,
[float(aspect_ratio) for aspect_ratio in cfg.aspect_ratios],
cfg.scales_per_octave,
cfg.normalize_coordinates
)
else:
raise ValueError('Empty anchor generator.')
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generator_builder."""
import math
import tensorflow as tf
from google.protobuf import text_format
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.anchor_generators import multiple_grid_anchor_generator
from object_detection.anchor_generators import multiscale_grid_anchor_generator
from object_detection.builders import anchor_generator_builder
from object_detection.protos import anchor_generator_pb2
class AnchorGeneratorBuilderTest(tf.test.TestCase):
def assert_almost_list_equal(self, expected_list, actual_list, delta=None):
self.assertEqual(len(expected_list), len(actual_list))
for expected_item, actual_item in zip(expected_list, actual_list):
self.assertAlmostEqual(expected_item, actual_item, delta=delta)
def test_build_grid_anchor_generator_with_defaults(self):
anchor_generator_text_proto = """
grid_anchor_generator {
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
grid_anchor_generator.GridAnchorGenerator))
self.assertListEqual(anchor_generator_object._scales, [])
self.assertListEqual(anchor_generator_object._aspect_ratios, [])
with self.test_session() as sess:
base_anchor_size, anchor_offset, anchor_stride = sess.run(
[anchor_generator_object._base_anchor_size,
anchor_generator_object._anchor_offset,
anchor_generator_object._anchor_stride])
self.assertAllEqual(anchor_offset, [0, 0])
self.assertAllEqual(anchor_stride, [16, 16])
self.assertAllEqual(base_anchor_size, [256, 256])
def test_build_grid_anchor_generator_with_non_default_parameters(self):
anchor_generator_text_proto = """
grid_anchor_generator {
height: 128
width: 512
height_stride: 10
width_stride: 20
height_offset: 30
width_offset: 40
scales: [0.4, 2.2]
aspect_ratios: [0.3, 4.5]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
grid_anchor_generator.GridAnchorGenerator))
self.assert_almost_list_equal(anchor_generator_object._scales,
[0.4, 2.2])
self.assert_almost_list_equal(anchor_generator_object._aspect_ratios,
[0.3, 4.5])
with self.test_session() as sess:
base_anchor_size, anchor_offset, anchor_stride = sess.run(
[anchor_generator_object._base_anchor_size,
anchor_generator_object._anchor_offset,
anchor_generator_object._anchor_stride])
self.assertAllEqual(anchor_offset, [30, 40])
self.assertAllEqual(anchor_stride, [10, 20])
self.assertAllEqual(base_anchor_size, [128, 512])
def test_build_ssd_anchor_generator_with_defaults(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator))
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, 0.2, 0.2),
(0.35, 0.418),
(0.499, 0.570),
(0.649, 0.721),
(0.799, 0.871),
(0.949, 0.974)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
[(1.0, 2.0, 0.5)] + 5 * [(1.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
with self.test_session() as sess:
base_anchor_size = sess.run(anchor_generator_object._base_anchor_size)
self.assertAllClose(base_anchor_size, [1.0, 1.0])
def test_build_ssd_anchor_generator_with_custom_scales(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator))
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, math.sqrt(0.1 * 0.15)),
(0.15, math.sqrt(0.15 * 0.2)),
(0.2, math.sqrt(0.2 * 0.4)),
(0.4, math.sqrt(0.4 * 0.6)),
(0.6, math.sqrt(0.6 * 0.8)),
(0.8, math.sqrt(0.8 * 1.0))]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
def test_build_ssd_anchor_generator_with_custom_interpolated_scale(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [0.5]
interpolated_scale_aspect_ratio: 0.5
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator))
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
6 * [(0.5, 0.5)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
def test_build_ssd_anchor_generator_without_reduced_boxes(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator))
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.2, 0.264),
(0.35, 0.418),
(0.499, 0.570),
(0.649, 0.721),
(0.799, 0.871),
(0.949, 0.974)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
6 * [(1.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
with self.test_session() as sess:
base_anchor_size = sess.run(anchor_generator_object._base_anchor_size)
self.assertAllClose(base_anchor_size, [1.0, 1.0])
def test_build_ssd_anchor_generator_with_non_default_parameters(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
num_layers: 2
min_scale: 0.3
max_scale: 0.8
aspect_ratios: [2.0]
height_stride: 16
height_stride: 32
width_stride: 20
width_stride: 30
height_offset: 8
height_offset: 16
width_offset: 0
width_offset: 10
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator))
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, 0.3, 0.3), (0.8, 0.894)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
[(1.0, 2.0, 0.5), (2.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
for actual_strides, expected_strides in zip(
list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]):
self.assert_almost_list_equal(expected_strides, actual_strides)
for actual_offsets, expected_offsets in zip(
list(anchor_generator_object._anchor_offsets), [(8, 0), (16, 10)]):
self.assert_almost_list_equal(expected_offsets, actual_offsets)
with self.test_session() as sess:
base_anchor_size = sess.run(anchor_generator_object._base_anchor_size)
self.assertAllClose(base_anchor_size, [1.0, 1.0])
def test_raise_value_error_on_empty_anchor_genertor(self):
anchor_generator_text_proto = """
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
with self.assertRaises(ValueError):
anchor_generator_builder.build(anchor_generator_proto)
def test_build_multiscale_anchor_generator_custom_aspect_ratios(self):
anchor_generator_text_proto = """
multiscale_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiscale_grid_anchor_generator.
MultiscaleGridAnchorGenerator))
for level, anchor_grid_info in zip(
range(3, 8), anchor_generator_object._anchor_grid_info):
self.assertEqual(set(anchor_grid_info.keys()), set(['level', 'info']))
self.assertTrue(level, anchor_grid_info['level'])
self.assertEqual(len(anchor_grid_info['info']), 4)
self.assertAllClose(anchor_grid_info['info'][0], [2**0, 2**0.5])
self.assertTrue(anchor_grid_info['info'][1], 1.0)
self.assertAllClose(anchor_grid_info['info'][2],
[4.0 * 2**level, 4.0 * 2**level])
self.assertAllClose(anchor_grid_info['info'][3], [2**level, 2**level])
self.assertTrue(anchor_generator_object._normalize_coordinates)
def test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates(
self):
anchor_generator_text_proto = """
multiscale_anchor_generator {
aspect_ratios: [1.0]
normalize_coordinates: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiscale_grid_anchor_generator.
MultiscaleGridAnchorGenerator))
self.assertFalse(anchor_generator_object._normalize_coordinates)
if __name__ == '__main__':
tf.test.main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection box coder from configuration."""
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import keypoint_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.box_coders import square_box_coder
from object_detection.protos import box_coder_pb2
def build(box_coder_config):
"""Builds a box coder object based on the box coder config.
Args:
box_coder_config: A box_coder.proto object containing the config for the
desired box coder.
Returns:
BoxCoder based on the config.
Raises:
ValueError: On empty box coder proto.
"""
if not isinstance(box_coder_config, box_coder_pb2.BoxCoder):
raise ValueError('box_coder_config not of type box_coder_pb2.BoxCoder.')
if box_coder_config.WhichOneof('box_coder_oneof') == 'faster_rcnn_box_coder':
return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[
box_coder_config.faster_rcnn_box_coder.y_scale,
box_coder_config.faster_rcnn_box_coder.x_scale,
box_coder_config.faster_rcnn_box_coder.height_scale,
box_coder_config.faster_rcnn_box_coder.width_scale
])
if box_coder_config.WhichOneof('box_coder_oneof') == 'keypoint_box_coder':
return keypoint_box_coder.KeypointBoxCoder(
box_coder_config.keypoint_box_coder.num_keypoints,
scale_factors=[
box_coder_config.keypoint_box_coder.y_scale,
box_coder_config.keypoint_box_coder.x_scale,
box_coder_config.keypoint_box_coder.height_scale,
box_coder_config.keypoint_box_coder.width_scale
])
if (box_coder_config.WhichOneof('box_coder_oneof') ==
'mean_stddev_box_coder'):
return mean_stddev_box_coder.MeanStddevBoxCoder(
stddev=box_coder_config.mean_stddev_box_coder.stddev)
if box_coder_config.WhichOneof('box_coder_oneof') == 'square_box_coder':
return square_box_coder.SquareBoxCoder(scale_factors=[
box_coder_config.square_box_coder.y_scale,
box_coder_config.square_box_coder.x_scale,
box_coder_config.square_box_coder.length_scale
])
raise ValueError('Empty box coder.')
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_coder_builder."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import keypoint_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.box_coders import square_box_coder
from object_detection.builders import box_coder_builder
from object_detection.protos import box_coder_pb2
class BoxCoderBuilderTest(tf.test.TestCase):
def test_build_faster_rcnn_box_coder_with_defaults(self):
box_coder_text_proto = """
faster_rcnn_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object,
faster_rcnn_box_coder.FasterRcnnBoxCoder)
self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0])
def test_build_faster_rcnn_box_coder_with_non_default_parameters(self):
box_coder_text_proto = """
faster_rcnn_box_coder {
y_scale: 6.0
x_scale: 3.0
height_scale: 7.0
width_scale: 8.0
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object,
faster_rcnn_box_coder.FasterRcnnBoxCoder)
self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0])
def test_build_keypoint_box_coder_with_defaults(self):
box_coder_text_proto = """
keypoint_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder)
self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0])
def test_build_keypoint_box_coder_with_non_default_parameters(self):
box_coder_text_proto = """
keypoint_box_coder {
num_keypoints: 6
y_scale: 6.0
x_scale: 3.0
height_scale: 7.0
width_scale: 8.0
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder)
self.assertEqual(box_coder_object._num_keypoints, 6)
self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0])
def test_build_mean_stddev_box_coder(self):
box_coder_text_proto = """
mean_stddev_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertTrue(
isinstance(box_coder_object,
mean_stddev_box_coder.MeanStddevBoxCoder))
def test_build_square_box_coder_with_defaults(self):
box_coder_text_proto = """
square_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertTrue(
isinstance(box_coder_object, square_box_coder.SquareBoxCoder))
self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0])
def test_build_square_box_coder_with_non_default_parameters(self):
box_coder_text_proto = """
square_box_coder {
y_scale: 6.0
x_scale: 3.0
length_scale: 7.0
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertTrue(
isinstance(box_coder_object, square_box_coder.SquareBoxCoder))
self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0])
def test_raise_error_on_empty_box_coder(self):
box_coder_text_proto = """
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
with self.assertRaises(ValueError):
box_coder_builder.build(box_coder_proto)
if __name__ == '__main__':
tf.test.main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
from object_detection.core import box_predictor
from object_detection.protos import box_predictor_pb2
def build(argscope_fn, box_predictor_config, is_training, num_classes):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
conv_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams_fn = argscope_fn(conv_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
min_depth=conv_box_predictor.min_depth,
max_depth=conv_box_predictor.max_depth,
num_layers_before_predictor=(conv_box_predictor.
num_layers_before_predictor),
use_dropout=conv_box_predictor.use_dropout,
dropout_keep_prob=conv_box_predictor.dropout_keep_probability,
kernel_size=conv_box_predictor.kernel_size,
box_code_size=conv_box_predictor.box_code_size,
apply_sigmoid_to_scores=conv_box_predictor.apply_sigmoid_to_scores,
class_prediction_bias_init=(conv_box_predictor.
class_prediction_bias_init),
use_depthwise=conv_box_predictor.use_depthwise
)
return box_predictor_object
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
conv_box_predictor = (box_predictor_config.
weight_shared_convolutional_box_predictor)
conv_hyperparams_fn = argscope_fn(conv_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=conv_box_predictor.depth,
num_layers_before_predictor=(
conv_box_predictor.num_layers_before_predictor),
kernel_size=conv_box_predictor.kernel_size,
box_code_size=conv_box_predictor.box_code_size,
class_prediction_bias_init=conv_box_predictor.
class_prediction_bias_init,
use_dropout=conv_box_predictor.use_dropout,
dropout_keep_prob=conv_box_predictor.dropout_keep_probability)
return box_predictor_object
if box_predictor_oneof == 'mask_rcnn_box_predictor':
mask_rcnn_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams_fn = argscope_fn(mask_rcnn_box_predictor.fc_hyperparams,
is_training)
conv_hyperparams_fn = None
if mask_rcnn_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams_fn = argscope_fn(
mask_rcnn_box_predictor.conv_hyperparams, is_training)
box_predictor_object = box_predictor.MaskRCNNBoxPredictor(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=mask_rcnn_box_predictor.use_dropout,
dropout_keep_prob=mask_rcnn_box_predictor.dropout_keep_probability,
box_code_size=mask_rcnn_box_predictor.box_code_size,
conv_hyperparams_fn=conv_hyperparams_fn,
predict_instance_masks=mask_rcnn_box_predictor.predict_instance_masks,
mask_height=mask_rcnn_box_predictor.mask_height,
mask_width=mask_rcnn_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
mask_rcnn_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
mask_rcnn_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
mask_rcnn_box_predictor.masks_are_class_agnostic),
predict_keypoints=mask_rcnn_box_predictor.predict_keypoints,
share_box_across_classes=(
mask_rcnn_box_predictor.share_box_across_classes))
return box_predictor_object
if box_predictor_oneof == 'rfcn_box_predictor':
rfcn_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams_fn = argscope_fn(rfcn_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = box_predictor.RfcnBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
crop_size=[rfcn_box_predictor.crop_height,
rfcn_box_predictor.crop_width],
num_spatial_bins=[rfcn_box_predictor.num_spatial_bins_height,
rfcn_box_predictor.num_spatial_bins_width],
depth=rfcn_box_predictor.depth,
box_code_size=rfcn_box_predictor.box_code_size)
return box_predictor_object
raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof))
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_predictor_builder."""
import mock
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.protos import box_predictor_pb2
from object_detection.protos import hyperparams_pb2
class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
min_depth: 2
max_depth: 16
num_layers_before_predictor: 2
use_dropout: false
dropout_keep_probability: 0.4
kernel_size: 3
box_code_size: 3
apply_sigmoid_to_scores: true
class_prediction_bias_init: 4.0
use_depthwise: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
self.assertEqual(box_predictor._min_depth, 2)
self.assertEqual(box_predictor._max_depth, 16)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertFalse(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.4)
self.assertTrue(box_predictor._apply_sigmoid_to_scores)
self.assertAlmostEqual(box_predictor._class_prediction_bias_init, 4.0)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
self.assertTrue(box_predictor._use_depthwise)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._min_depth, 0)
self.assertEqual(box_predictor._max_depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertTrue(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.8)
self.assertFalse(box_predictor._apply_sigmoid_to_scores)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertFalse(box_predictor._use_depthwise)
class WeightSharedConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
(box_predictor_proto.weight_shared_convolutional_box_predictor
.conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
depth: 2
num_layers_before_predictor: 2
kernel_size: 7
box_code_size: 3
class_prediction_bias_init: 4.0
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
(box_predictor_proto.weight_shared_convolutional_box_predictor.
conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
self.assertEqual(box_predictor._depth, 2)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertAlmostEqual(box_predictor._class_prediction_bias_init, 4.0)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
class MaskRCNNBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_builder_calls_fc_argscope_fn(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
op: FC
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
mock_argscope_fn.assert_called_with(hyperparams_proto, False)
self.assertEqual(box_predictor._fc_hyperparams_fn, 'arg_scope')
def test_non_default_mask_rcnn_box_predictor(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
op: FC
"""
box_predictor_text_proto = """
mask_rcnn_box_predictor {
use_dropout: true
dropout_keep_probability: 0.8
box_code_size: 3
share_box_across_classes: true
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
def mock_fc_argscope_builder(fc_hyperparams_arg, is_training):
return (fc_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_fc_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertTrue(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.8)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 3)
self.assertEqual(box_predictor._share_box_across_classes, True)
def test_build_default_mask_rcnn_box_predictor(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor = box_predictor_builder.build(
argscope_fn=mock.Mock(return_value='arg_scope'),
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertFalse(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertFalse(box_predictor._predict_instance_masks)
self.assertFalse(box_predictor._predict_keypoints)
def test_build_box_predictor_with_mask_branch(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
box_predictor_proto.mask_rcnn_box_predictor.mask_height = 16
box_predictor_proto.mask_rcnn_box_predictor.mask_width = 16
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
self.assertFalse(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertTrue(box_predictor._predict_instance_masks)
self.assertEqual(box_predictor._mask_prediction_conv_depth, 512)
self.assertFalse(box_predictor._predict_keypoints)
class RfcnBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_fc_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_non_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
box_predictor_text_proto = """
rfcn_box_predictor {
num_spatial_bins_height: 4
num_spatial_bins_width: 4
depth: 4
box_code_size: 3
crop_height: 16
crop_width: 16
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 3)
self.assertEqual(box_predictor._num_spatial_bins, [4, 4])
self.assertEqual(box_predictor._crop_size, [16, 16])
def test_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertEqual(box_predictor._num_spatial_bins, [3, 3])
self.assertEqual(box_predictor._crop_size, [12, 12])
if __name__ == '__main__':
tf.test.main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
import functools
import tensorflow as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
def _get_padding_shapes(dataset, max_num_boxes=None, num_classes=None,
spatial_image_shape=None):
"""Returns shapes to pad dataset tensors to before batching.
Args:
dataset: tf.data.Dataset object.
max_num_boxes: Max number of groundtruth boxes needed to computes shapes for
padding.
num_classes: Number of classes in the dataset needed to compute shapes for
padding.
spatial_image_shape: A list of two integers of the form [height, width]
containing expected spatial shape of the image.
Returns:
A dictionary keyed by fields.InputDataFields containing padding shapes for
tensors in the dataset.
Raises:
ValueError: If groundtruth classes is neither rank 1 nor rank 2.
"""
if not spatial_image_shape or spatial_image_shape == [-1, -1]:
height, width = None, None
else:
height, width = spatial_image_shape # pylint: disable=unpacking-non-sequence
num_additional_channels = 0
if fields.InputDataFields.image_additional_channels in dataset.output_shapes:
num_additional_channels = dataset.output_shapes[
fields.InputDataFields.image_additional_channels].dims[2].value
padding_shapes = {
# Additional channels are merged before batching.
fields.InputDataFields.image: [
height, width, 3 + num_additional_channels
],
fields.InputDataFields.image_additional_channels: [
height, width, num_additional_channels
],
fields.InputDataFields.source_id: [],
fields.InputDataFields.filename: [],
fields.InputDataFields.key: [],
fields.InputDataFields.groundtruth_difficult: [max_num_boxes],
fields.InputDataFields.groundtruth_boxes: [max_num_boxes, 4],
fields.InputDataFields.groundtruth_instance_masks: [
max_num_boxes, height, width
],
fields.InputDataFields.groundtruth_is_crowd: [max_num_boxes],
fields.InputDataFields.groundtruth_group_of: [max_num_boxes],
fields.InputDataFields.groundtruth_area: [max_num_boxes],
fields.InputDataFields.groundtruth_weights: [max_num_boxes],
fields.InputDataFields.num_groundtruth_boxes: [],
fields.InputDataFields.groundtruth_label_types: [max_num_boxes],
fields.InputDataFields.groundtruth_label_scores: [max_num_boxes],
fields.InputDataFields.true_image_shape: [3],
fields.InputDataFields.multiclass_scores: [
max_num_boxes, num_classes + 1 if num_classes is not None else None
],
}
# Determine whether groundtruth_classes are integers or one-hot encodings, and
# apply batching appropriately.
classes_shape = dataset.output_shapes[
fields.InputDataFields.groundtruth_classes]
if len(classes_shape) == 1: # Class integers.
padding_shapes[fields.InputDataFields.groundtruth_classes] = [max_num_boxes]
elif len(classes_shape) == 2: # One-hot or k-hot encoding.
padding_shapes[fields.InputDataFields.groundtruth_classes] = [
max_num_boxes, num_classes]
else:
raise ValueError('Groundtruth classes must be a rank 1 tensor (classes) or '
'rank 2 tensor (one-hot encodings)')
if fields.InputDataFields.original_image in dataset.output_shapes:
padding_shapes[fields.InputDataFields.original_image] = [
None, None, 3 + num_additional_channels
]
if fields.InputDataFields.groundtruth_keypoints in dataset.output_shapes:
tensor_shape = dataset.output_shapes[fields.InputDataFields.
groundtruth_keypoints]
padding_shape = [max_num_boxes, tensor_shape[1].value,
tensor_shape[2].value]
padding_shapes[fields.InputDataFields.groundtruth_keypoints] = padding_shape
if (fields.InputDataFields.groundtruth_keypoint_visibilities
in dataset.output_shapes):
tensor_shape = dataset.output_shapes[fields.InputDataFields.
groundtruth_keypoint_visibilities]
padding_shape = [max_num_boxes, tensor_shape[1].value]
padding_shapes[fields.InputDataFields.
groundtruth_keypoint_visibilities] = padding_shape
return {tensor_key: padding_shapes[tensor_key]
for tensor_key, _ in dataset.output_shapes.items()}
def build(input_reader_config,
transform_input_data_fn=None,
batch_size=None,
max_num_boxes=None,
num_classes=None,
spatial_image_shape=None,
num_additional_channels=0):
"""Builds a tf.data.Dataset.
Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all
records. Applies a padded batch to the resulting dataset.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
transform_input_data_fn: Function to apply to all records, or None if
no extra decoding is required.
batch_size: Batch size. If None, batching is not performed.
max_num_boxes: Max number of groundtruth boxes needed to compute shapes for
padding. If None, will use a dynamic shape.
num_classes: Number of classes in the dataset needed to compute shapes for
padding. If None, will use a dynamic shape.
spatial_image_shape: A list of two integers of the form [height, width]
containing expected spatial shape of the image after applying
transform_input_data_fn. If None, will use dynamic shapes.
num_additional_channels: Number of additional channels to use in the input.
Returns:
A tf.data.Dataset based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
config = input_reader_config.tf_record_input_reader
if not config.input_path:
raise ValueError('At least one input path must be specified in '
'`input_reader_config`.')
label_map_proto_file = None
if input_reader_config.HasField('label_map_path'):
label_map_proto_file = input_reader_config.label_map_path
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=input_reader_config.load_instance_masks,
instance_mask_type=input_reader_config.mask_type,
label_map_proto_file=label_map_proto_file,
use_display_name=input_reader_config.use_display_name,
num_additional_channels=num_additional_channels)
def process_fn(value):
processed = decoder.decode(value)
if transform_input_data_fn is not None:
return transform_input_data_fn(processed)
return processed
dataset = dataset_util.read_dataset(
functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000),
process_fn, config.input_path[:], input_reader_config)
if batch_size:
padding_shapes = _get_padding_shapes(dataset, max_num_boxes, num_classes,
spatial_image_shape)
dataset = dataset.apply(
tf.contrib.data.padded_batch_and_drop_remainder(batch_size,
padding_shapes))
return dataset
raise ValueError('Unsupported input_reader_config.')
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset_builder."""
import os
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from object_detection.builders import dataset_builder
from object_detection.core import standard_fields as fields
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
class DatasetBuilderTest(tf.test.TestCase):
def create_tf_record(self, has_additional_channels=False):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
additional_channels_tensor = np.random.randint(
255, size=(4, 5, 1)).astype(np.uint8)
flat_mask = (4 * 5) * [1.0]
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
encoded_additional_channels_jpeg = tf.image.encode_jpeg(
tf.constant(additional_channels_tensor)).eval()
features = {
'image/encoded':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(value=[encoded_jpeg])),
'image/format':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(value=['jpeg'.encode('utf-8')])
),
'image/height':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=[4])),
'image/width':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=[5])),
'image/object/bbox/xmin':
feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[0.0])),
'image/object/bbox/xmax':
feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[1.0])),
'image/object/bbox/ymin':
feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[0.0])),
'image/object/bbox/ymax':
feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[1.0])),
'image/object/class/label':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=[2])),
'image/object/mask':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=flat_mask)),
}
if has_additional_channels:
features['image/additional_channels/encoded'] = feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[encoded_additional_channels_jpeg] * 2))
example = example_pb2.Example(
features=feature_pb2.Features(feature=features))
writer.write(example.SerializeToString())
writer.close()
return path
def test_build_tf_record_input_reader(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(input_reader_proto, batch_size=1)).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertTrue(
fields.InputDataFields.groundtruth_instance_masks not in output_dict)
self.assertEquals((1, 4, 5, 3),
output_dict[fields.InputDataFields.image].shape)
self.assertAllEqual([[2]],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEquals(
(1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0][0])
def test_build_tf_record_input_reader_and_load_instance_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(input_reader_proto, batch_size=1)).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertAllEqual(
(1, 1, 4, 5),
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
def test_build_tf_record_input_reader_with_batch_size_two(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def one_hot_class_encoding_fn(tensor_dict):
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
return tensor_dict
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(
input_reader_proto,
transform_input_data_fn=one_hot_class_encoding_fn,
batch_size=2,
max_num_boxes=2,
num_classes=3,
spatial_image_shape=[4, 5])).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertAllEqual([2, 4, 5, 3],
output_dict[fields.InputDataFields.image].shape)
self.assertAllEqual([2, 2, 3],
output_dict[fields.InputDataFields.groundtruth_classes].
shape)
self.assertAllEqual([2, 2, 4],
output_dict[fields.InputDataFields.groundtruth_boxes].
shape)
self.assertAllEqual(
[[[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]],
output_dict[fields.InputDataFields.groundtruth_boxes])
def test_build_tf_record_input_reader_with_batch_size_two_and_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def one_hot_class_encoding_fn(tensor_dict):
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
return tensor_dict
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(
input_reader_proto,
transform_input_data_fn=one_hot_class_encoding_fn,
batch_size=2,
max_num_boxes=2,
num_classes=3,
spatial_image_shape=[4, 5])).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertAllEqual(
[2, 2, 4, 5],
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
def test_build_tf_record_input_reader_with_additional_channels(self):
tf_record_path = self.create_tf_record(has_additional_channels=True)
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = dataset_util.make_initializable_iterator(
dataset_builder.build(
input_reader_proto, batch_size=2,
num_additional_channels=2)).get_next()
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertEquals((2, 4, 5, 5),
output_dict[fields.InputDataFields.image].shape)
def test_raises_error_with_no_input_paths(self):
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
"""
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
with self.assertRaises(ValueError):
dataset_builder.build(input_reader_proto)
if __name__ == '__main__':
tf.test.main()
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for quantized training and evaluation."""
import tensorflow as tf
def build(graph_rewriter_config, is_training):
"""Returns a function that modifies default graph based on options.
Args:
graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto.
is_training: whether in training of eval mode.
"""
def graph_rewrite_fn():
"""Function to quantize weights and activation of the default graph."""
if (graph_rewriter_config.quantization.weight_bits != 8 or
graph_rewriter_config.quantization.activation_bits != 8):
raise ValueError('Only 8bit quantization is supported')
# Quantize the graph by inserting quantize ops for weights and activations
if is_training:
tf.contrib.quantize.create_training_graph(
input_graph=tf.get_default_graph(),
quant_delay=graph_rewriter_config.quantization.delay)
else:
tf.contrib.quantize.create_eval_graph(input_graph=tf.get_default_graph())
tf.contrib.layers.summarize_collection('quant_vars')
return graph_rewrite_fn
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_rewriter_builder."""
import mock
import tensorflow as tf
from object_detection.builders import graph_rewriter_builder
from object_detection.protos import graph_rewriter_pb2
class QuantizationBuilderTest(tf.test.TestCase):
def testQuantizationBuilderSetsUpCorrectTrainArguments(self):
with mock.patch.object(
tf.contrib.quantize, 'create_training_graph') as mock_quant_fn:
with mock.patch.object(tf.contrib.layers,
'summarize_collection') as mock_summarize_col:
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
graph_rewriter_proto.quantization.weight_bits = 8
graph_rewriter_proto.quantization.activation_bits = 8
graph_rewrite_fn = graph_rewriter_builder.build(
graph_rewriter_proto, is_training=True)
graph_rewrite_fn()
_, kwargs = mock_quant_fn.call_args
self.assertEqual(kwargs['input_graph'], tf.get_default_graph())
self.assertEqual(kwargs['quant_delay'], 10)
mock_summarize_col.assert_called_with('quant_vars')
def testQuantizationBuilderSetsUpCorrectEvalArguments(self):
with mock.patch.object(tf.contrib.quantize,
'create_eval_graph') as mock_quant_fn:
with mock.patch.object(tf.contrib.layers,
'summarize_collection') as mock_summarize_col:
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
graph_rewrite_fn = graph_rewriter_builder.build(
graph_rewriter_proto, is_training=False)
graph_rewrite_fn()
_, kwargs = mock_quant_fn.call_args
self.assertEqual(kwargs['input_graph'], tf.get_default_graph())
mock_summarize_col.assert_called_with('quant_vars')
if __name__ == '__main__':
tf.test.main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function to construct tf-slim arg_scope for convolution, fc ops."""
import tensorflow as tf
from object_detection.protos import hyperparams_pb2
from object_detection.utils import context_manager
slim = tf.contrib.slim
def build(hyperparams_config, is_training):
"""Builds tf-slim arg_scope for convolution ops based on the config.
Returns an arg_scope to use for convolution ops containing weights
initializer, weights regularizer, activation function, batch norm function
and batch norm parameters based on the configuration.
Note that if the batch_norm parameteres are not specified in the config
(i.e. left to default) then batch norm is excluded from the arg_scope.
The batch norm parameters are set for updates based on `is_training` argument
and conv_hyperparams_config.batch_norm.train parameter. During training, they
are updated only if batch_norm.train parameter is true. However, during eval,
no updates are made to the batch norm variables. In both cases, their current
values are used during forward pass.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
is_training: Whether the network is in training mode.
Returns:
arg_scope_fn: A function to construct tf-slim arg_scope containing
hyperparameters for ops.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
batch_norm = None
batch_norm_params = None
if hyperparams_config.HasField('batch_norm'):
batch_norm = slim.batch_norm
batch_norm_params = _build_batch_norm_params(
hyperparams_config.batch_norm, is_training)
affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose]
if hyperparams_config.HasField('op') and (
hyperparams_config.op == hyperparams_pb2.Hyperparams.FC):
affected_ops = [slim.fully_connected]
def scope_fn():
with (slim.arg_scope([slim.batch_norm], **batch_norm_params)
if batch_norm_params is not None else
context_manager.IdentityContextManager()):
with slim.arg_scope(
affected_ops,
weights_regularizer=_build_regularizer(
hyperparams_config.regularizer),
weights_initializer=_build_initializer(
hyperparams_config.initializer),
activation_fn=_build_activation_fn(hyperparams_config.activation),
normalizer_fn=batch_norm) as sc:
return sc
return scope_fn
def _build_activation_fn(activation_fn):
"""Builds a callable activation from config.
Args:
activation_fn: hyperparams_pb2.Hyperparams.activation
Returns:
Callable activation function.
Raises:
ValueError: On unknown activation function.
"""
if activation_fn == hyperparams_pb2.Hyperparams.NONE:
return None
if activation_fn == hyperparams_pb2.Hyperparams.RELU:
return tf.nn.relu
if activation_fn == hyperparams_pb2.Hyperparams.RELU_6:
return tf.nn.relu6
raise ValueError('Unknown activation function: {}'.format(activation_fn))
def _build_regularizer(regularizer):
"""Builds a tf-slim regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf-slim regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_initializer(initializer):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf initializer.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof = initializer.WhichOneof('initializer_oneof')
if initializer_oneof == 'truncated_normal_initializer':
return tf.truncated_normal_initializer(
mean=initializer.truncated_normal_initializer.mean,
stddev=initializer.truncated_normal_initializer.stddev)
if initializer_oneof == 'random_normal_initializer':
return tf.random_normal_initializer(
mean=initializer.random_normal_initializer.mean,
stddev=initializer.random_normal_initializer.stddev)
if initializer_oneof == 'variance_scaling_initializer':
enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer.
DESCRIPTOR.enum_types_by_name['Mode'])
mode = enum_descriptor.values_by_number[initializer.
variance_scaling_initializer.
mode].name
return slim.variance_scaling_initializer(
factor=initializer.variance_scaling_initializer.factor,
mode=mode,
uniform=initializer.variance_scaling_initializer.uniform)
raise ValueError('Unknown initializer function: {}'.format(
initializer_oneof))
def _build_batch_norm_params(batch_norm, is_training):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_training: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params = {
'decay': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
# Remove is_training parameter from here and deprecate it in the proto
# once we refactor Faster RCNN models to set is_training through an outer
# arg_scope in the meta architecture.
'is_training': is_training and batch_norm.train,
}
return batch_norm_params
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment