"vscode:/vscode.git/clone" did not exist on "07f93888739b6c428c64b46be8352beb0ff8a3e3"
Commit 78d5f8f8 authored by Zhichao Lu's avatar Zhichao Lu Committed by lzc5123016
Browse files

Merged commit includes the following changes:

187187978  by Zhichao Lu:

    Only updating hyperparameters if they have non-null values.

--
187097690  by Zhichao Lu:

    Rewrite some conditions a bit more clearly.

--
187085190  by Zhichao Lu:

    More informative error message.

--
186935376  by Zhichao Lu:

    Added option to evaluator.evaluate to use custom evaluator objects.

--
186808249  by Zhichao Lu:

    Fix documentation re: number of stages.

--
186775014  by Zhichao Lu:

    Change anchor generator interface to return a list of BoxLists containing anchors for different feature map layers.

--
186729028  by Zhichao Lu:

    Minor fixes to object detection.

--
186723716  by Zhichao Lu:

    Fix tf_example_decoder.py initailization issue.

--
186668505  by Zhichao Lu:

    Remove unused import.

--
186475361  by Zhichao Lu:

    Update the box predictor interface to return list of predictions - one from each feature map - instead of stacking them into one large tensor.

--
186410844  by Zhich...
parent 629adffa
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for dealing with writing json strings. """Utilities for dealing with writing json strings.
json_utils wraps json.dump and json.dumps so that they can be used to safely json_utils wraps json.dump and json.dumps so that they can be used to safely
......
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.image.understanding.object_detection.utils.json_utils.""" """Tests for google3.image.understanding.object_detection.utils.json_utils."""
import os import os
......
...@@ -142,6 +142,7 @@ def manual_stepping(global_step, boundaries, rates): ...@@ -142,6 +142,7 @@ def manual_stepping(global_step, boundaries, rates):
if len(rates) != len(boundaries) + 1: if len(rates) != len(boundaries) + 1:
raise ValueError('Number of provided learning rates must exceed ' raise ValueError('Number of provided learning rates must exceed '
'number of boundary points by exactly 1.') 'number of boundary points by exactly 1.')
if not boundaries: return tf.constant(rates[0])
step_boundaries = tf.constant(boundaries, tf.int32) step_boundaries = tf.constant(boundaries, tf.int32)
num_boundaries = len(boundaries) num_boundaries = len(boundaries)
learning_rates = tf.constant(rates, tf.float32) learning_rates = tf.constant(rates, tf.float32)
......
...@@ -75,5 +75,21 @@ class LearningSchedulesTest(test_case.TestCase): ...@@ -75,5 +75,21 @@ class LearningSchedulesTest(test_case.TestCase):
exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0] exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0]
self.assertAllClose(output_rates, exp_rates) self.assertAllClose(output_rates, exp_rates)
def testManualSteppingWithZeroBoundaries(self):
def graph_fn(global_step):
boundaries = []
rates = [0.01]
learning_rate = learning_schedules.manual_stepping(
global_step, boundaries, rates)
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)])
for i in range(4)
]
exp_rates = [0.01] * 4
self.assertAllClose(output_rates, exp_rates)
if __name__ == '__main__': if __name__ == '__main__':
tf.test.main() tf.test.main()
...@@ -19,7 +19,6 @@ Example box operations that are supported: ...@@ -19,7 +19,6 @@ Example box operations that are supported:
* Areas: compute bounding box areas * Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores * IOU: pairwise intersection-over-union scores
""" """
import numpy as np import numpy as np
from object_detection.utils import np_box_list from object_detection.utils import np_box_list
......
...@@ -19,7 +19,6 @@ Example box operations that are supported: ...@@ -19,7 +19,6 @@ Example box operations that are supported:
* Areas: compute bounding box areas * Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores * IOU: pairwise intersection-over-union scores
""" """
import numpy as np import numpy as np
from object_detection.utils import np_box_list_ops from object_detection.utils import np_box_list_ops
......
...@@ -224,7 +224,7 @@ def padded_one_hot_encoding(indices, depth, left_pad): ...@@ -224,7 +224,7 @@ def padded_one_hot_encoding(indices, depth, left_pad):
ValueError: if `indices` does not have rank 1 or if `left_pad` or `depth are ValueError: if `indices` does not have rank 1 or if `left_pad` or `depth are
either negative or non-integers. either negative or non-integers.
TODO: add runtime checks for depth and indices. TODO(rathodv): add runtime checks for depth and indices.
""" """
if depth < 0 or not isinstance(depth, six.integer_types): if depth < 0 or not isinstance(depth, six.integer_types):
raise ValueError('`depth` must be a non-negative integer.') raise ValueError('`depth` must be a non-negative integer.')
...@@ -474,7 +474,7 @@ def normalize_to_target(inputs, ...@@ -474,7 +474,7 @@ def normalize_to_target(inputs,
Note that the rank of `inputs` must be known and the dimension to which Note that the rank of `inputs` must be known and the dimension to which
normalization is to be applied should be statically defined. normalization is to be applied should be statically defined.
TODO: Add option to scale by L2 norm of the entire input. TODO(jonathanhuang): Add option to scale by L2 norm of the entire input.
Args: Args:
inputs: A `Tensor` of arbitrary size. inputs: A `Tensor` of arbitrary size.
...@@ -704,7 +704,7 @@ def reframe_box_masks_to_image_masks(box_masks, boxes, image_height, ...@@ -704,7 +704,7 @@ def reframe_box_masks_to_image_masks(box_masks, boxes, image_height,
Returns: Returns:
A tf.float32 tensor of size [num_masks, image_height, image_width]. A tf.float32 tensor of size [num_masks, image_height, image_width].
""" """
# TODO: Make this a public function. # TODO(rathodv): Make this a public function.
def transform_boxes_relative_to_boxes(boxes, reference_boxes): def transform_boxes_relative_to_boxes(boxes, reference_boxes):
boxes = tf.reshape(boxes, [-1, 2, 2]) boxes = tf.reshape(boxes, [-1, 2, 2])
min_corner = tf.expand_dims(reference_boxes[:, 0:2], 1) min_corner = tf.expand_dims(reference_boxes[:, 0:2], 1)
......
...@@ -152,7 +152,7 @@ def static_or_dynamic_map_fn(fn, elems, dtype=None, ...@@ -152,7 +152,7 @@ def static_or_dynamic_map_fn(fn, elems, dtype=None,
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors. Tensor or list of Tensors.
TODO: make this function fully interchangeable with tf.map_fn. TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args: Args:
fn: The callable to be performed. It accepts one argument, which will have fn: The callable to be performed. It accepts one argument, which will have
......
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A convenience wrapper around tf.test.TestCase to enable TPU tests.""" """A convenience wrapper around tf.test.TestCase to enable TPU tests."""
import tensorflow as tf import tensorflow as tf
......
...@@ -23,7 +23,7 @@ import tensorflow as tf ...@@ -23,7 +23,7 @@ import tensorflow as tf
slim = tf.contrib.slim slim = tf.contrib.slim
# TODO: Consider replacing with tf.contrib.filter_variables in # TODO(derekjchow): Consider replacing with tf.contrib.filter_variables in
# tensorflow/contrib/framework/python/ops/variables.py # tensorflow/contrib/framework/python/ops/variables.py
def filter_variables(variables, filter_regex_list, invert=False): def filter_variables(variables, filter_regex_list, invert=False):
"""Filters out the variables matching the filter_regex. """Filters out the variables matching the filter_regex.
...@@ -104,7 +104,7 @@ def get_variables_available_in_checkpoint(variables, ...@@ -104,7 +104,7 @@ def get_variables_available_in_checkpoint(variables,
Inspects given checkpoint and returns the subset of variables that are Inspects given checkpoint and returns the subset of variables that are
available in it. available in it.
TODO: force input and output to be a dictionary. TODO(rathodv): force input and output to be a dictionary.
Args: Args:
variables: a list or dictionary of variables to find in checkpoint. variables: a list or dictionary of variables to find in checkpoint.
......
...@@ -13,12 +13,7 @@ ...@@ -13,12 +13,7 @@
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
"""Tests for image.understanding.object_detection.core.visualization_utils. """Tests for image.understanding.object_detection.core.visualization_utils."""
Testing with visualization in the following colab:
https://drive.google.com/a/google.com/file/d/0B5HnKS_hMsNARERpU3MtU3I5RFE/view?usp=sharing
"""
import logging import logging
import os import os
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment