Unverified Commit 451906e4 authored by pkulzc's avatar pkulzc Committed by GitHub
Browse files

Release MobileDet code and model, and require tf_slim installation for OD API. (#8562)



* Merged commit includes the following changes:
311933687  by Sergio Guadarrama:

    Removes spurios use of tf.compat.v2, which results in spurious tf.compat.v1.compat.v2. Adds basic test to nasnet_utils.
    Replaces all remaining import tensorflow as tf with import tensorflow.compat.v1 as tf

--
311766063  by Sergio Guadarrama:

    Removes explicit tf.compat.v1 in all call sites (we already import tf.compat.v1, so this code was  doing tf.compat.v1.compat.v1). The existing code worked in latest version of tensorflow, 2.2, (and 1.15) but not in 1.14 or in 2.0.0a, this CL fixes it.

--
311624958  by Sergio Guadarrama:

    Updates README that doesn't render properly in github documentation

--
310980959  by Sergio Guadarrama:

    Moves research_models/slim off tf.contrib.slim/layers/framework to tf_slim

--
310263156  by Sergio Guadarrama:

    Adds model breakdown for MobilenetV3

--
308640516  by Sergio Guadarrama:

    Internal change

308244396  by Sergio Guadarrama:

    GroupNormalization support for MobilenetV3.

--
307475800  by Sergio Guadarrama:

    Internal change

--
302077708  by Sergio Guadarrama:

    Remove `disable_tf2` behavior from slim py_library targets

--
301208453  by Sergio Guadarrama:

    Automated refactoring to make code Python 3 compatible.

--
300816672  by Sergio Guadarrama:

    Internal change

299433840  by Sergio Guadarrama:

    Internal change

299221609  by Sergio Guadarrama:

    Explicitly disable Tensorflow v2 behaviors for all TF1.x binaries and tests

--
299179617  by Sergio Guadarrama:

    Internal change

299040784  by Sergio Guadarrama:

    Internal change

299036699  by Sergio Guadarrama:

    Internal change

298736510  by Sergio Guadarrama:

    Internal change

298732599  by Sergio Guadarrama:

    Internal change

298729507  by Sergio Guadarrama:

    Internal change

298253328  by Sergio Guadarrama:

    Internal change

297788346  by Sergio Guadarrama:

    Internal change

297785278  by Sergio Guadarrama:

    Internal change

297783127  by Sergio Guadarrama:

    Internal change

297725870  by Sergio Guadarrama:

    Internal change

297721811  by Sergio Guadarrama:

    Internal change

297711347  by Sergio Guadarrama:

    Internal change

297708059  by Sergio Guadarrama:

    Internal change

297701831  by Sergio Guadarrama:

    Internal change

297700038  by Sergio Guadarrama:

    Internal change

297670468  by Sergio Guadarrama:

    Internal change.

--
297350326  by Sergio Guadarrama:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
297201668  by Sergio Guadarrama:

    Explicitly replace "import tensorflow" with "tensorflow.compat.v1" for TF2.x migration

--
294483372  by Sergio Guadarrama:

    Internal change

PiperOrigin-RevId: 311933687

* Merged commit includes the following changes:
312578615  by Menglong Zhu:

    Modify the LSTM feature extractors to be python 3 compatible.

--
311264357  by Menglong Zhu:

    Removes contrib.slim

--
308957207  by Menglong Zhu:

    Automated refactoring to make code Python 3 compatible.

--
306976470  by yongzhe:

    Internal change

306777559  by Menglong Zhu:

    Internal change

--
299232507  by lzyuan:

    Internal update.

--
299221735  by lzyuan:

    Add small epsilon on max_range for quantize_op to prevent range collapse.

--

PiperOrigin-RevId: 312578615

* Merged commit includes the following changes:
310447280  by lzc:

    Internal changes.

--

PiperOrigin-RevId: 310447280
Co-authored-by: default avatarSergio Guadarrama <sguada@google.com>
Co-authored-by: default avatarMenglong Zhu <menglong@google.com>
parent 73b5be67
...@@ -25,7 +25,7 @@ import numpy as np ...@@ -25,7 +25,7 @@ import numpy as np
import pandas as pd import pandas as pd
from pycocotools import mask as coco_mask from pycocotools import mask as coco_mask
import six import six
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields from object_detection.core import standard_fields
from object_detection.metrics import oid_challenge_evaluation_utils as utils from object_detection.metrics import oid_challenge_evaluation_utils as utils
......
...@@ -20,7 +20,7 @@ from __future__ import print_function ...@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields from object_detection.core import standard_fields
from object_detection.metrics import oid_vrd_challenge_evaluation_utils as utils from object_detection.metrics import oid_vrd_challenge_evaluation_utils as utils
from object_detection.utils import vrd_evaluation from object_detection.utils import vrd_evaluation
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import numpy as np import numpy as np
import numpy.testing as np_testing import numpy.testing as np_testing
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields from object_detection.core import standard_fields as fields
from object_detection.metrics import tf_example_parser from object_detection.metrics import tf_example_parser
......
...@@ -22,7 +22,9 @@ import copy ...@@ -22,7 +22,9 @@ import copy
import functools import functools
import os import os
import tensorflow as tf import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection import eval_util from object_detection import eval_util
from object_detection import exporter as exporter_lib from object_detection import exporter as exporter_lib
...@@ -40,11 +42,8 @@ from object_detection.utils import visualization_utils as vis_utils ...@@ -40,11 +42,8 @@ from object_detection.utils import visualization_utils as vis_utils
# pylint: disable=g-import-not-at-top # pylint: disable=g-import-not-at-top
try: try:
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import learn as contrib_learn from tensorflow.contrib import learn as contrib_learn
from tensorflow.contrib import tpu as contrib_tpu from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
except ImportError: except ImportError:
# TF 2.0 doesn't ship with contrib. # TF 2.0 doesn't ship with contrib.
pass pass
...@@ -95,6 +94,10 @@ def _prepare_groundtruth_for_eval(detection_model, class_agnostic, ...@@ -95,6 +94,10 @@ def _prepare_groundtruth_for_eval(detection_model, class_agnostic,
of groundtruth boxes per image.. of groundtruth boxes per image..
'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32 'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32
tensor of keypoints (if provided in groundtruth). tensor of keypoints (if provided in groundtruth).
'groundtruth_group_of': [batch_size, num_boxes] bool tensor indicating
group_of annotations (if provided in groundtruth).
'groundtruth_labeled_classes': [batch_size, num_classes] int64
tensor of 1-indexed classes.
class_agnostic: Boolean indicating whether detections are class agnostic. class_agnostic: Boolean indicating whether detections are class agnostic.
""" """
input_data_fields = fields.InputDataFields() input_data_fields = fields.InputDataFields()
...@@ -138,6 +141,29 @@ def _prepare_groundtruth_for_eval(detection_model, class_agnostic, ...@@ -138,6 +141,29 @@ def _prepare_groundtruth_for_eval(detection_model, class_agnostic,
detection_model.groundtruth_lists( detection_model.groundtruth_lists(
fields.BoxListFields.keypoint_visibilities)) fields.BoxListFields.keypoint_visibilities))
if detection_model.groundtruth_has_field(fields.BoxListFields.group_of):
groundtruth[input_data_fields.groundtruth_group_of] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.group_of))
if detection_model.groundtruth_has_field(
fields.InputDataFields.groundtruth_labeled_classes):
labeled_classes_list = detection_model.groundtruth_lists(
fields.InputDataFields.groundtruth_labeled_classes)
labeled_classes = [
tf.where(x)[:, 0] + label_id_offset for x in labeled_classes_list
]
if len(labeled_classes) > 1:
num_classes = labeled_classes_list[0].shape[0]
padded_labeled_classes = []
for x in labeled_classes:
padding = num_classes - tf.shape(x)[0]
padded_labeled_classes.append(tf.pad(x, [[0, padding]]))
groundtruth[input_data_fields.groundtruth_labeled_classes] = tf.stack(
padded_labeled_classes)
else:
groundtruth[input_data_fields.groundtruth_labeled_classes] = tf.stack(
labeled_classes)
groundtruth[input_data_fields.num_groundtruth_boxes] = ( groundtruth[input_data_fields.num_groundtruth_boxes] = (
tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]])) tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]]))
return groundtruth return groundtruth
...@@ -213,6 +239,7 @@ def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True): ...@@ -213,6 +239,7 @@ def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):
unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size) unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)
unpadded_tensor_list.append(unpadded_tensor) unpadded_tensor_list.append(unpadded_tensor)
unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list
unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict) unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)
return unbatched_tensor_dict return unbatched_tensor_dict
...@@ -252,6 +279,9 @@ def provide_groundtruth(model, labels): ...@@ -252,6 +279,9 @@ def provide_groundtruth(model, labels):
gt_is_crowd_list = None gt_is_crowd_list = None
if fields.InputDataFields.groundtruth_is_crowd in labels: if fields.InputDataFields.groundtruth_is_crowd in labels:
gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd] gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd]
gt_group_of_list = None
if fields.InputDataFields.groundtruth_group_of in labels:
gt_group_of_list = labels[fields.InputDataFields.groundtruth_group_of]
gt_area_list = None gt_area_list = None
if fields.InputDataFields.groundtruth_area in labels: if fields.InputDataFields.groundtruth_area in labels:
gt_area_list = labels[fields.InputDataFields.groundtruth_area] gt_area_list = labels[fields.InputDataFields.groundtruth_area]
...@@ -269,6 +299,7 @@ def provide_groundtruth(model, labels): ...@@ -269,6 +299,7 @@ def provide_groundtruth(model, labels):
groundtruth_keypoint_visibilities_list=gt_keypoint_visibilities_list, groundtruth_keypoint_visibilities_list=gt_keypoint_visibilities_list,
groundtruth_weights_list=gt_weights_list, groundtruth_weights_list=gt_weights_list,
groundtruth_is_crowd_list=gt_is_crowd_list, groundtruth_is_crowd_list=gt_is_crowd_list,
groundtruth_group_of_list=gt_group_of_list,
groundtruth_area_list=gt_area_list) groundtruth_area_list=gt_area_list)
...@@ -447,7 +478,7 @@ def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False, ...@@ -447,7 +478,7 @@ def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False,
exclude_variables = ( exclude_variables = (
train_config.freeze_variables train_config.freeze_variables
if train_config.freeze_variables else None) if train_config.freeze_variables else None)
trainable_variables = contrib_framework.filter_variables( trainable_variables = slim.filter_variables(
tf.trainable_variables(), tf.trainable_variables(),
include_patterns=include_variables, include_patterns=include_variables,
exclude_patterns=exclude_variables) exclude_patterns=exclude_variables)
...@@ -462,7 +493,7 @@ def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False, ...@@ -462,7 +493,7 @@ def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False,
summaries = [] if use_tpu else None summaries = [] if use_tpu else None
if train_config.summarize_gradients: if train_config.summarize_gradients:
summaries = ['gradients', 'gradient_norm', 'global_gradient_norm'] summaries = ['gradients', 'gradient_norm', 'global_gradient_norm']
train_op = contrib_layers.optimize_loss( train_op = slim.optimizers.optimize_loss(
loss=total_loss, loss=total_loss,
global_step=global_step, global_step=global_step,
learning_rate=None, learning_rate=None,
...@@ -826,7 +857,48 @@ def create_train_and_eval_specs(train_input_fn, ...@@ -826,7 +857,48 @@ def create_train_and_eval_specs(train_input_fn,
return train_spec, eval_specs return train_spec, eval_specs
def continuous_eval(estimator, model_dir, input_fn, train_steps, name): def _evaluate_checkpoint(estimator,
input_fn,
checkpoint_path,
name,
max_retries=0):
"""Evaluates a checkpoint.
Args:
estimator: Estimator object to use for evaluation.
input_fn: Input function to use for evaluation.
checkpoint_path: Path of the checkpoint to evaluate.
name: Namescope for eval summary.
max_retries: Maximum number of times to retry the evaluation on encountering
a tf.errors.InvalidArgumentError. If negative, will always retry the
evaluation.
Returns:
Estimator evaluation results.
"""
always_retry = True if max_retries < 0 else False
retries = 0
while always_retry or retries <= max_retries:
try:
return estimator.evaluate(
input_fn=input_fn,
steps=None,
checkpoint_path=checkpoint_path,
name=name)
except tf.errors.InvalidArgumentError as e:
if always_retry or retries < max_retries:
tf.logging.info('Retrying checkpoint evaluation after exception: %s', e)
retries += 1
else:
raise e
def continuous_eval(estimator,
model_dir,
input_fn,
train_steps,
name,
max_retries=0):
"""Perform continuous evaluation on checkpoints written to a model directory. """Perform continuous evaluation on checkpoints written to a model directory.
Args: Args:
...@@ -836,20 +908,27 @@ def continuous_eval(estimator, model_dir, input_fn, train_steps, name): ...@@ -836,20 +908,27 @@ def continuous_eval(estimator, model_dir, input_fn, train_steps, name):
train_steps: Number of training steps. This is used to infer the last train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop. checkpoint and stop evaluation loop.
name: Namescope for eval summary. name: Namescope for eval summary.
max_retries: Maximum number of times to retry the evaluation on encountering
a tf.errors.InvalidArgumentError. If negative, will always retry the
evaluation.
""" """
def terminate_eval(): def terminate_eval():
tf.logging.info('Terminating eval after 180 seconds of no checkpoints') tf.logging.info('Terminating eval after 180 seconds of no checkpoints')
return True return True
for ckpt in contrib_training.checkpoints_iterator( for ckpt in tf.train.checkpoints_iterator(
model_dir, min_interval_secs=180, timeout=None, model_dir, min_interval_secs=180, timeout=None,
timeout_fn=terminate_eval): timeout_fn=terminate_eval):
tf.logging.info('Starting Evaluation.') tf.logging.info('Starting Evaluation.')
try: try:
eval_results = estimator.evaluate( eval_results = _evaluate_checkpoint(
input_fn=input_fn, steps=None, checkpoint_path=ckpt, name=name) estimator=estimator,
input_fn=input_fn,
checkpoint_path=ckpt,
name=name,
max_retries=max_retries)
tf.logging.info('Eval results: %s' % eval_results) tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached # Terminate eval job when final checkpoint is reached
......
...@@ -22,7 +22,7 @@ import functools ...@@ -22,7 +22,7 @@ import functools
import os import os
import numpy as np import numpy as np
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib.tpu.python.tpu import tpu_config from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator from tensorflow.contrib.tpu.python.tpu import tpu_estimator
...@@ -42,11 +42,18 @@ MODEL_NAME_FOR_TEST = 'ssd_inception_v2_pets' ...@@ -42,11 +42,18 @@ MODEL_NAME_FOR_TEST = 'ssd_inception_v2_pets'
# Model for testing keypoints. # Model for testing keypoints.
MODEL_NAME_FOR_KEYPOINTS_TEST = 'ssd_mobilenet_v1_fpp' MODEL_NAME_FOR_KEYPOINTS_TEST = 'ssd_mobilenet_v1_fpp'
# Model for testing tfSequenceExample inputs.
MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST = 'context_rcnn_camera_trap'
def _get_data_path():
def _get_data_path(model_name):
"""Returns an absolute path to TFRecord file.""" """Returns an absolute path to TFRecord file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', if model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST:
'pets_examples.record') return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data',
'snapshot_serengeti_sequence_examples.record')
else:
return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data',
'pets_examples.record')
def get_pipeline_config_path(model_name): def get_pipeline_config_path(model_name):
...@@ -54,6 +61,9 @@ def get_pipeline_config_path(model_name): ...@@ -54,6 +61,9 @@ def get_pipeline_config_path(model_name):
if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST: if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST:
return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data',
model_name + '.config') model_name + '.config')
elif model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST:
return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data',
model_name + '.config')
else: else:
return os.path.join(tf.resource_loader.get_data_files_path(), 'samples', return os.path.join(tf.resource_loader.get_data_files_path(), 'samples',
'configs', model_name + '.config') 'configs', model_name + '.config')
...@@ -71,12 +81,20 @@ def _get_keypoints_labelmap_path(): ...@@ -71,12 +81,20 @@ def _get_keypoints_labelmap_path():
'face_person_with_keypoints_label_map.pbtxt') 'face_person_with_keypoints_label_map.pbtxt')
def _get_sequence_example_labelmap_path():
"""Returns an absolute path to label map file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'data',
'snapshot_serengeti_label_map.pbtxt')
def _get_configs_for_model(model_name): def _get_configs_for_model(model_name):
"""Returns configurations for model.""" """Returns configurations for model."""
filename = get_pipeline_config_path(model_name) filename = get_pipeline_config_path(model_name)
data_path = _get_data_path() data_path = _get_data_path(model_name)
if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST: if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST:
label_map_path = _get_keypoints_labelmap_path() label_map_path = _get_keypoints_labelmap_path()
elif model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST:
label_map_path = _get_sequence_example_labelmap_path()
else: else:
label_map_path = _get_labelmap_path() label_map_path = _get_labelmap_path()
configs = config_util.get_configs_from_pipeline_file(filename) configs = config_util.get_configs_from_pipeline_file(filename)
...@@ -99,7 +117,7 @@ def _make_initializable_iterator(dataset): ...@@ -99,7 +117,7 @@ def _make_initializable_iterator(dataset):
Returns: Returns:
A `tf.data.Iterator`. A `tf.data.Iterator`.
""" """
iterator = dataset.make_initializable_iterator() iterator = tf.data.make_initializable_iterator(dataset)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator return iterator
...@@ -199,6 +217,11 @@ class ModelLibTest(tf.test.TestCase): ...@@ -199,6 +217,11 @@ class ModelLibTest(tf.test.TestCase):
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
self._assert_model_fn_for_train_eval(configs, 'train') self._assert_model_fn_for_train_eval(configs, 'train')
def test_model_fn_in_train_mode_sequences(self):
"""Tests the model function in TRAIN mode."""
configs = _get_configs_for_model(MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST)
self._assert_model_fn_for_train_eval(configs, 'train')
def test_model_fn_in_train_mode_freeze_all_variables(self): def test_model_fn_in_train_mode_freeze_all_variables(self):
"""Tests model_fn TRAIN mode with all variables frozen.""" """Tests model_fn TRAIN mode with all variables frozen."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
...@@ -229,6 +252,11 @@ class ModelLibTest(tf.test.TestCase): ...@@ -229,6 +252,11 @@ class ModelLibTest(tf.test.TestCase):
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
self._assert_model_fn_for_train_eval(configs, 'eval') self._assert_model_fn_for_train_eval(configs, 'eval')
def test_model_fn_in_eval_mode_sequences(self):
"""Tests the model function in EVAL mode."""
configs = _get_configs_for_model(MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST)
self._assert_model_fn_for_train_eval(configs, 'eval')
def test_model_fn_in_keypoints_eval_mode(self): def test_model_fn_in_keypoints_eval_mode(self):
"""Tests the model function in EVAL mode with keypoints config.""" """Tests the model function in EVAL mode with keypoints config."""
configs = _get_configs_for_model(MODEL_NAME_FOR_KEYPOINTS_TEST) configs = _get_configs_for_model(MODEL_NAME_FOR_KEYPOINTS_TEST)
...@@ -270,6 +298,27 @@ class ModelLibTest(tf.test.TestCase): ...@@ -270,6 +298,27 @@ class ModelLibTest(tf.test.TestCase):
self.assertIn('eval_input_fns', train_and_eval_dict) self.assertIn('eval_input_fns', train_and_eval_dict)
self.assertIn('eval_on_train_input_fn', train_and_eval_dict) self.assertIn('eval_on_train_input_fn', train_and_eval_dict)
def test_create_estimator_and_inputs_sequence_example(self):
"""Tests that Estimator and input function are constructed correctly."""
run_config = tf.estimator.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(
MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST)
train_steps = 20
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps)
estimator = train_and_eval_dict['estimator']
train_steps = train_and_eval_dict['train_steps']
self.assertIsInstance(estimator, tf.estimator.Estimator)
self.assertEqual(20, train_steps)
self.assertIn('train_input_fn', train_and_eval_dict)
self.assertIn('eval_input_fns', train_and_eval_dict)
self.assertIn('eval_on_train_input_fn', train_and_eval_dict)
def test_create_estimator_with_default_train_eval_steps(self): def test_create_estimator_with_default_train_eval_steps(self):
"""Tests that number of train/eval defaults to config values.""" """Tests that number of train/eval defaults to config values."""
run_config = tf.estimator.RunConfig() run_config = tf.estimator.RunConfig()
......
...@@ -22,7 +22,7 @@ import copy ...@@ -22,7 +22,7 @@ import copy
import os import os
import time import time
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection import eval_util from object_detection import eval_util
from object_detection import inputs from object_detection import inputs
...@@ -101,6 +101,10 @@ def _compute_losses_and_predictions_dicts( ...@@ -101,6 +101,10 @@ def _compute_losses_and_predictions_dicts(
instance masks for objects. instance masks for objects.
labels[fields.InputDataFields.groundtruth_keypoints] is a labels[fields.InputDataFields.groundtruth_keypoints] is a
float32 tensor containing keypoints for each box. float32 tensor containing keypoints for each box.
labels[fields.InputDataFields.groundtruth_group_of] is a tf.bool tensor
containing group_of annotations.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32
k-hot tensor of classes.
add_regularization_loss: Whether or not to include the model's add_regularization_loss: Whether or not to include the model's
regularization loss in the losses dictionary. regularization loss in the losses dictionary.
...@@ -199,6 +203,8 @@ def eager_train_step(detection_model, ...@@ -199,6 +203,8 @@ def eager_train_step(detection_model,
labels[fields.InputDataFields.groundtruth_keypoints] is a labels[fields.InputDataFields.groundtruth_keypoints] is a
[batch_size, num_boxes, num_keypoints, 2] float32 tensor containing [batch_size, num_boxes, num_keypoints, 2] float32 tensor containing
keypoints for each box. keypoints for each box.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32
k-hot tensor of classes.
unpad_groundtruth_tensors: A parameter passed to unstack_batch. unpad_groundtruth_tensors: A parameter passed to unstack_batch.
optimizer: The training optimizer that will update the variables. optimizer: The training optimizer that will update the variables.
learning_rate: The learning rate tensor for the current training step. learning_rate: The learning rate tensor for the current training step.
......
...@@ -23,7 +23,7 @@ import tempfile ...@@ -23,7 +23,7 @@ import tempfile
import numpy as np import numpy as np
import six import six
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection import inputs from object_detection import inputs
from object_detection import model_hparams from object_detection import model_hparams
......
...@@ -20,7 +20,7 @@ from __future__ import print_function ...@@ -20,7 +20,7 @@ from __future__ import print_function
from absl import flags from absl import flags
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection import model_hparams from object_detection import model_hparams
from object_detection import model_lib from object_detection import model_lib
...@@ -53,6 +53,11 @@ flags.DEFINE_boolean( ...@@ -53,6 +53,11 @@ flags.DEFINE_boolean(
'run_once', False, 'If running in eval-only mode, whether to run just ' 'run_once', False, 'If running in eval-only mode, whether to run just '
'one round of eval vs running continuously (default).' 'one round of eval vs running continuously (default).'
) )
flags.DEFINE_integer(
'max_eval_retries', 0, 'If running continuous eval, the maximum number of '
'retries upon encountering tf.errors.InvalidArgumentError. If negative, '
'will always retry the evaluation.'
)
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
...@@ -91,7 +96,7 @@ def main(unused_argv): ...@@ -91,7 +96,7 @@ def main(unused_argv):
FLAGS.checkpoint_dir)) FLAGS.checkpoint_dir))
else: else:
model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir, input_fn, model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir, input_fn,
train_steps, name) train_steps, name, FLAGS.max_eval_retries)
else: else:
train_spec, eval_specs = model_lib.create_train_and_eval_specs( train_spec, eval_specs = model_lib.create_train_and_eval_specs(
train_input_fn, train_input_fn,
......
...@@ -23,7 +23,7 @@ from __future__ import division ...@@ -23,7 +23,7 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from absl import flags from absl import flags
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection import model_hparams from object_detection import model_hparams
...@@ -85,6 +85,11 @@ flags.DEFINE_string( ...@@ -85,6 +85,11 @@ flags.DEFINE_string(
'where event and checkpoint files will be written.') 'where event and checkpoint files will be written.')
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.') 'file.')
flags.DEFINE_integer(
'max_eval_retries', 0, 'If running continuous eval, the maximum number of '
'retries upon encountering tf.errors.InvalidArgumentError. If negative, '
'will always retry the evaluation.'
)
FLAGS = tf.flags.FLAGS FLAGS = tf.flags.FLAGS
...@@ -142,7 +147,7 @@ def main(unused_argv): ...@@ -142,7 +147,7 @@ def main(unused_argv):
# Currently only a single eval input is allowed. # Currently only a single eval input is allowed.
input_fn = eval_input_fns[0] input_fn = eval_input_fns[0]
model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps, model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps,
name) name, FLAGS.max_eval_retries)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -16,8 +16,8 @@ ...@@ -16,8 +16,8 @@
"""Embedded-friendly SSDFeatureExtractor for MobilenetV1 features.""" """Embedded-friendly SSDFeatureExtractor for MobilenetV1 features."""
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators from object_detection.models import feature_map_generators
...@@ -25,8 +25,6 @@ from object_detection.utils import context_manager ...@@ -25,8 +25,6 @@ from object_detection.utils import context_manager
from object_detection.utils import ops from object_detection.utils import ops
from nets import mobilenet_v1 from nets import mobilenet_v1
slim = contrib_slim
class EmbeddedSSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): class EmbeddedSSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""Embedded-friendly SSD Feature Extractor using MobilenetV1 features. """Embedded-friendly SSD Feature Extractor using MobilenetV1 features.
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
"""Tests for embedded_ssd_mobilenet_v1_feature_extractor.""" """Tests for embedded_ssd_mobilenet_v1_feature_extractor."""
import numpy as np import numpy as np
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection.models import embedded_ssd_mobilenet_v1_feature_extractor from object_detection.models import embedded_ssd_mobilenet_v1_feature_extractor
from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_feature_extractor_test
......
...@@ -22,15 +22,13 @@ as well as ...@@ -22,15 +22,13 @@ as well as
Huang et al. (https://arxiv.org/abs/1611.10012) Huang et al. (https://arxiv.org/abs/1611.10012)
""" """
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import variables_helper from object_detection.utils import variables_helper
from nets import inception_resnet_v2 from nets import inception_resnet_v2
slim = contrib_slim
class FasterRCNNInceptionResnetV2FeatureExtractor( class FasterRCNNInceptionResnetV2FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
"""Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor.""" """Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor."""
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res
......
...@@ -25,7 +25,7 @@ Huang et al. (https://arxiv.org/abs/1611.10012) ...@@ -25,7 +25,7 @@ Huang et al. (https://arxiv.org/abs/1611.10012)
# Skip pylint for this file because it times out # Skip pylint for this file because it times out
# pylint: skip-file # pylint: skip-file
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models.keras_models import inception_resnet_v2 from object_detection.models.keras_models import inception_resnet_v2
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
"""Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor.""" """Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor."""
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res
......
...@@ -18,14 +18,12 @@ ...@@ -18,14 +18,12 @@
See "Rethinking the Inception Architecture for Computer Vision" See "Rethinking the Inception Architecture for Computer Vision"
https://arxiv.org/abs/1512.00567 https://arxiv.org/abs/1512.00567
""" """
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import faster_rcnn_meta_arch
from nets import inception_v2 from nets import inception_v2
slim = contrib_slim
def _batch_norm_arg_scope(list_ops, def _batch_norm_arg_scope(list_ops,
use_batch_norm=True, use_batch_norm=True,
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
"""Tests for faster_rcnn_inception_v2_feature_extractor.""" """Tests for faster_rcnn_inception_v2_feature_extractor."""
import numpy as np import numpy as np
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_inception_v2_feature_extractor as faster_rcnn_inception_v2 from object_detection.models import faster_rcnn_inception_v2_feature_extractor as faster_rcnn_inception_v2
......
...@@ -16,15 +16,13 @@ ...@@ -16,15 +16,13 @@
"""Mobilenet v1 Faster R-CNN implementation.""" """Mobilenet v1 Faster R-CNN implementation."""
import numpy as np import numpy as np
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import shape_utils from object_detection.utils import shape_utils
from nets import mobilenet_v1 from nets import mobilenet_v1
slim = contrib_slim
def _get_mobilenet_conv_no_last_stride_defs(conv_depth_ratio_in_percentage): def _get_mobilenet_conv_no_last_stride_defs(conv_depth_ratio_in_percentage):
if conv_depth_ratio_in_percentage not in [25, 50, 75, 100]: if conv_depth_ratio_in_percentage not in [25, 50, 75, 100]:
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
"""Tests for faster_rcnn_mobilenet_v1_feature_extractor.""" """Tests for faster_rcnn_mobilenet_v1_feature_extractor."""
import numpy as np import numpy as np
import tensorflow as tf import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_mobilenet_v1_feature_extractor as faster_rcnn_mobilenet_v1 from object_detection.models import faster_rcnn_mobilenet_v1_feature_extractor as faster_rcnn_mobilenet_v1
......
...@@ -26,17 +26,15 @@ from __future__ import division ...@@ -26,17 +26,15 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from six.moves import range from six.moves import range
import tensorflow as tf import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework import tf_slim as slim
from tensorflow.contrib import slim as contrib_slim
from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import variables_helper from object_detection.utils import variables_helper
from nets.nasnet import nasnet from nets.nasnet import nasnet
from nets.nasnet import nasnet_utils from nets.nasnet import nasnet_utils
arg_scope = contrib_framework.arg_scope arg_scope = slim.arg_scope
slim = contrib_slim
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False): def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment