Unverified Commit be9b8025 authored by Jonathan Huang's avatar Jonathan Huang Committed by GitHub
Browse files

Merge pull request #3380 from pkulzc/master

Internal changes for object detection.
parents d3143cbc c173234f
...@@ -14,65 +14,66 @@ ...@@ -14,65 +14,66 @@
# ============================================================================== # ==============================================================================
"""Tests for object_detection.utils.learning_schedules.""" """Tests for object_detection.utils.learning_schedules."""
import numpy as np
import tensorflow as tf import tensorflow as tf
from object_detection.utils import learning_schedules from object_detection.utils import learning_schedules
from object_detection.utils import test_case
class LearningSchedulesTest(tf.test.TestCase): class LearningSchedulesTest(test_case.TestCase):
def testExponentialDecayWithBurnin(self): def testExponentialDecayWithBurnin(self):
global_step = tf.placeholder(tf.int32, []) def graph_fn(global_step):
learning_rate_base = 1.0 learning_rate_base = 1.0
learning_rate_decay_steps = 3 learning_rate_decay_steps = 3
learning_rate_decay_factor = .1 learning_rate_decay_factor = .1
burnin_learning_rate = .5 burnin_learning_rate = .5
burnin_steps = 2 burnin_steps = 2
learning_rate = learning_schedules.exponential_decay_with_burnin(
global_step, learning_rate_base, learning_rate_decay_steps,
learning_rate_decay_factor, burnin_learning_rate, burnin_steps)
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(8)
]
exp_rates = [.5, .5, 1, .1, .1, .1, .01, .01] exp_rates = [.5, .5, 1, .1, .1, .1, .01, .01]
learning_rate = learning_schedules.exponential_decay_with_burnin( self.assertAllClose(output_rates, exp_rates, rtol=1e-4)
global_step, learning_rate_base, learning_rate_decay_steps,
learning_rate_decay_factor, burnin_learning_rate, burnin_steps)
with self.test_session() as sess:
output_rates = []
for input_global_step in range(8):
output_rate = sess.run(learning_rate,
feed_dict={global_step: input_global_step})
output_rates.append(output_rate)
self.assertAllClose(output_rates, exp_rates)
def testCosineDecayWithWarmup(self): def testCosineDecayWithWarmup(self):
global_step = tf.placeholder(tf.int32, []) def graph_fn(global_step):
learning_rate_base = 1.0 learning_rate_base = 1.0
total_steps = 100 total_steps = 100
warmup_learning_rate = 0.1 warmup_learning_rate = 0.1
warmup_steps = 9 warmup_steps = 9
input_global_steps = [0, 4, 8, 9, 100] learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step, learning_rate_base, total_steps,
warmup_learning_rate, warmup_steps)
return (learning_rate,)
exp_rates = [0.1, 0.5, 0.9, 1.0, 0] exp_rates = [0.1, 0.5, 0.9, 1.0, 0]
learning_rate = learning_schedules.cosine_decay_with_warmup( input_global_steps = [0, 4, 8, 9, 100]
global_step, learning_rate_base, total_steps, output_rates = [
warmup_learning_rate, warmup_steps) self.execute(graph_fn, [np.array(step).astype(np.int64)])
with self.test_session() as sess: for step in input_global_steps
output_rates = [] ]
for input_global_step in input_global_steps: self.assertAllClose(output_rates, exp_rates)
output_rate = sess.run(learning_rate,
feed_dict={global_step: input_global_step})
output_rates.append(output_rate)
self.assertAllClose(output_rates, exp_rates)
def testManualStepping(self): def testManualStepping(self):
global_step = tf.placeholder(tf.int64, []) def graph_fn(global_step):
boundaries = [2, 3, 7] boundaries = [2, 3, 7]
rates = [1.0, 2.0, 3.0, 4.0] rates = [1.0, 2.0, 3.0, 4.0]
learning_rate = learning_schedules.manual_stepping(
global_step, boundaries, rates)
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)])
for i in range(10)
]
exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0] exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0]
learning_rate = learning_schedules.manual_stepping(global_step, boundaries, self.assertAllClose(output_rates, exp_rates)
rates)
with self.test_session() as sess:
output_rates = []
for input_global_step in range(10):
output_rate = sess.run(learning_rate,
feed_dict={global_step: input_global_step})
output_rates.append(output_rate)
self.assertAllClose(output_rates, exp_rates)
if __name__ == '__main__': if __name__ == '__main__':
tf.test.main() tf.test.main()
...@@ -214,7 +214,7 @@ def non_max_suppression(boxlist, ...@@ -214,7 +214,7 @@ def non_max_suppression(boxlist,
is_index_valid = np.full(num_boxes, 1, dtype=bool) is_index_valid = np.full(num_boxes, 1, dtype=bool)
selected_indices = [] selected_indices = []
num_output = 0 num_output = 0
for i in xrange(num_boxes): for i in range(num_boxes):
if num_output < max_output_size: if num_output < max_output_size:
if is_index_valid[i]: if is_index_valid[i]:
num_output += 1 num_output += 1
......
...@@ -217,7 +217,7 @@ def non_max_suppression(box_mask_list, ...@@ -217,7 +217,7 @@ def non_max_suppression(box_mask_list,
is_index_valid = np.full(num_masks, 1, dtype=bool) is_index_valid = np.full(num_masks, 1, dtype=bool)
selected_indices = [] selected_indices = []
num_output = 0 num_output = 0
for i in xrange(num_masks): for i in range(num_masks):
if num_output < max_output_size: if num_output < max_output_size:
if is_index_valid[i]: if is_index_valid[i]:
num_output += 1 num_output += 1
......
...@@ -813,9 +813,10 @@ def matmul_gather_on_zeroth_axis(params, indices, scope=None): ...@@ -813,9 +813,10 @@ def matmul_gather_on_zeroth_axis(params, indices, scope=None):
from indices given by indices, with shape indices.shape + params.shape[1:]. from indices given by indices, with shape indices.shape + params.shape[1:].
""" """
with tf.name_scope(scope, 'MatMulGather'): with tf.name_scope(scope, 'MatMulGather'):
index_range = params.shape[0] params_shape = shape_utils.combined_static_and_dynamic_shape(params)
params2d = tf.reshape(params, [index_range, -1]) indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)
indicator_matrix = tf.one_hot(indices, index_range) params2d = tf.reshape(params, [params_shape[0], -1])
indicator_matrix = tf.one_hot(indices, params_shape[0])
gathered_result_flattened = tf.matmul(indicator_matrix, params2d) gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
return tf.reshape(gathered_result_flattened, return tf.reshape(gathered_result_flattened,
indices.shape.concatenate(params.shape[1:])) tf.stack(indices_shape + params_shape[1:]))
...@@ -840,7 +840,7 @@ class OpsTestPositionSensitiveCropRegions(tf.test.TestCase): ...@@ -840,7 +840,7 @@ class OpsTestPositionSensitiveCropRegions(tf.test.TestCase):
# All channels are equal so position-sensitive crop and resize should # All channels are equal so position-sensitive crop and resize should
# work as the usual crop and resize for just one channel. # work as the usual crop and resize for just one channel.
crop = tf.image.crop_and_resize(image, boxes, box_ind, crop_size) crop = tf.image.crop_and_resize(image, boxes, box_ind, crop_size)
crop_and_pool = tf.reduce_mean(crop, [1, 2], keep_dims=True) crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True)
ps_crop_and_pool = ops.position_sensitive_crop_regions( ps_crop_and_pool = ops.position_sensitive_crop_regions(
tiled_image, tiled_image,
...@@ -866,7 +866,7 @@ class OpsTestPositionSensitiveCropRegions(tf.test.TestCase): ...@@ -866,7 +866,7 @@ class OpsTestPositionSensitiveCropRegions(tf.test.TestCase):
# When a single bin is used, position-sensitive crop and pool should be # When a single bin is used, position-sensitive crop and pool should be
# the same as non-position sensitive crop and pool. # the same as non-position sensitive crop and pool.
crop = tf.image.crop_and_resize(image, boxes, box_ind, crop_size) crop = tf.image.crop_and_resize(image, boxes, box_ind, crop_size)
crop_and_pool = tf.reduce_mean(crop, [1, 2], keep_dims=True) crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True)
ps_crop_and_pool = ops.position_sensitive_crop_regions( ps_crop_and_pool = ops.position_sensitive_crop_regions(
image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=True) image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=True)
...@@ -1054,7 +1054,7 @@ class OpsTestPositionSensitiveCropRegions(tf.test.TestCase): ...@@ -1054,7 +1054,7 @@ class OpsTestPositionSensitiveCropRegions(tf.test.TestCase):
ps_crop = ops.position_sensitive_crop_regions( ps_crop = ops.position_sensitive_crop_regions(
image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=False) image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=False)
ps_crop_and_pool = tf.reduce_mean( ps_crop_and_pool = tf.reduce_mean(
ps_crop, reduction_indices=(1, 2), keep_dims=True) ps_crop, reduction_indices=(1, 2), keepdims=True)
with self.test_session() as sess: with self.test_session() as sess:
output = sess.run(ps_crop_and_pool) output = sess.run(ps_crop_and_pool)
...@@ -1225,6 +1225,21 @@ class MatmulGatherOnZerothAxis(test_case.TestCase): ...@@ -1225,6 +1225,21 @@ class MatmulGatherOnZerothAxis(test_case.TestCase):
gather_output = self.execute(graph_fn, [params, indices]) gather_output = self.execute(graph_fn, [params, indices])
self.assertAllClose(gather_output, expected_output) self.assertAllClose(gather_output, expected_output)
def test_gather_with_dynamic_shape_input(self):
params_placeholder = tf.placeholder(tf.float32, shape=[None, 4])
indices_placeholder = tf.placeholder(tf.int32, shape=[None])
gather_result = ops.matmul_gather_on_zeroth_axis(
params_placeholder, indices_placeholder)
params = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[0, 1, 0, 0]], dtype=np.float32)
indices = np.array([0, 0, 0, 0, 0, 0])
expected_output = np.array(6*[[1, 2, 3, 4]])
with self.test_session() as sess:
gather_output = sess.run(gather_result, feed_dict={
params_placeholder: params, indices_placeholder: indices})
self.assertAllClose(gather_output, expected_output)
if __name__ == '__main__': if __name__ == '__main__':
tf.test.main() tf.test.main()
...@@ -19,7 +19,7 @@ Testing with visualization in the following colab: ...@@ -19,7 +19,7 @@ Testing with visualization in the following colab:
https://drive.google.com/a/google.com/file/d/0B5HnKS_hMsNARERpU3MtU3I5RFE/view?usp=sharing https://drive.google.com/a/google.com/file/d/0B5HnKS_hMsNARERpU3MtU3I5RFE/view?usp=sharing
""" """
import logging
import os import os
import numpy as np import numpy as np
...@@ -145,7 +145,7 @@ class VisualizationUtilsTest(tf.test.TestCase): ...@@ -145,7 +145,7 @@ class VisualizationUtilsTest(tf.test.TestCase):
for i in range(images_with_boxes_np.shape[0]): for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png' img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name) output_file = os.path.join(self.get_temp_dir(), img_name)
print 'Writing output image %d to %s' % (i, output_file) logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...]) image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file) image_pil.save(output_file)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment