Commit 90cc9baa authored by Zhichao Lu's avatar Zhichao Lu Committed by pkulzc
Browse files

Migrating away from Experiment class, as it is now deprecated. Also,...

Migrating away from Experiment class, as it is now deprecated. Also, refactoring into a separate model library and binaries.

PiperOrigin-RevId: 192004845
parent 8deba73f
...@@ -12,30 +12,20 @@ ...@@ -12,30 +12,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
r"""Creates and runs `Experiment` for object detection model. r"""Constructs model, inputs, and training environment."""
This uses the TF.learn framework to define and run an object detection model
wrapped in an `Estimator`.
Note that this module is only compatible with SSD Meta architecture at the
moment.
"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import functools import functools
import os
import tensorflow as tf import tensorflow as tf
from google.protobuf import text_format from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.python.lib.io import file_io
from object_detection import eval_util from object_detection import eval_util
from object_detection import inputs from object_detection import inputs
from object_detection import model_hparams
from object_detection.builders import model_builder from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields from object_detection.core import standard_fields as fields
...@@ -45,15 +35,6 @@ from object_detection.utils import shape_utils ...@@ -45,15 +35,6 @@ from object_detection.utils import shape_utils
from object_detection.utils import variables_helper from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils as vis_utils from object_detection.utils import visualization_utils as vis_utils
tf.flags.DEFINE_string('model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
tf.flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
tf.flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
tf.flags.DEFINE_integer('num_eval_steps', None, 'Number of train steps.')
FLAGS = tf.flags.FLAGS
# A map of names to methods that help build the model. # A map of names to methods that help build the model.
MODEL_BUILD_UTIL_MAP = { MODEL_BUILD_UTIL_MAP = {
'get_configs_from_pipeline_file': 'get_configs_from_pipeline_file':
...@@ -406,33 +387,18 @@ def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False): ...@@ -406,33 +387,18 @@ def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False):
return model_fn return model_fn
def build_experiment_fn(train_steps, eval_steps): def create_estimator_and_inputs(run_config,
"""Returns a function that creates an `Experiment`."""
def build_experiment(run_config, hparams):
"""Builds an `Experiment` from configuration and hyperparameters.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
Returns:
An `Experiment` object.
"""
return populate_experiment(run_config, hparams, FLAGS.pipeline_config_path,
train_steps, eval_steps)
return build_experiment
def populate_experiment(run_config,
hparams, hparams,
pipeline_config_path, pipeline_config_path,
train_steps=None, train_steps=None,
eval_steps=None, eval_steps=None,
model_fn_creator=create_model_fn, model_fn_creator=create_model_fn,
use_tpu_estimator=False,
use_tpu=False,
num_shards=1,
params=None,
**kwargs): **kwargs):
"""Populates an `Experiment` object. """Creates `Estimator`, input functions, and steps.
Args: Args:
run_config: A `RunConfig`. run_config: A `RunConfig`.
...@@ -452,18 +418,33 @@ def populate_experiment(run_config, ...@@ -452,18 +418,33 @@ def populate_experiment(run_config,
* Returns: * Returns:
`model_fn` for `Estimator`. `model_fn` for `Estimator`.
use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,
an `Estimator` will be returned.
use_tpu: Boolean, whether training and evaluation should run on TPU. Only
used if `use_tpu_estimator` is True.
num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`
is True.
params: Parameter dictionary passed from the estimator. Only used if
`use_tpu_estimator` is True.
**kwargs: Additional keyword arguments for configuration override. **kwargs: Additional keyword arguments for configuration override.
Returns: Returns:
An `Experiment` that defines all aspects of training, evaluation, and A dictionary with the following fields:
export. 'estimator': An `Estimator` or `TPUEstimator`.
'train_input_fn': A training input function.
'eval_input_fn': An evaluation input function.
'predict_input_fn': A prediction input function.
'train_steps': Number of training steps. Either directly from input or from
configuration.
'eval_steps': Number of evaluation steps. Either directly from input or from
configuration.
""" """
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[ get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file'] 'get_configs_from_pipeline_file']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[ merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs'] 'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn'] create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']
create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn'] create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']
create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn'] create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']
...@@ -481,16 +462,16 @@ def populate_experiment(run_config, ...@@ -481,16 +462,16 @@ def populate_experiment(run_config,
eval_config = configs['eval_config'] eval_config = configs['eval_config']
eval_input_config = configs['eval_input_config'] eval_input_config = configs['eval_input_config']
if train_steps is None and train_config.num_steps: if train_steps is None:
train_steps = train_config.num_steps train_steps = configs['train_config'].num_steps
if eval_steps is None and eval_config.num_examples: if eval_steps is None:
eval_steps = eval_config.num_examples eval_steps = configs['eval_config'].num_examples
detection_model_fn = functools.partial( detection_model_fn = functools.partial(
model_builder.build, model_config=model_config) model_builder.build, model_config=model_config)
# Create the input functions for TRAIN/EVAL. # Create the input functions for TRAIN/EVAL/PREDICT.
train_input_fn = create_train_input_fn( train_input_fn = create_train_input_fn(
train_config=train_config, train_config=train_config,
train_input_config=train_input_config, train_input_config=train_input_config,
...@@ -499,51 +480,149 @@ def populate_experiment(run_config, ...@@ -499,51 +480,149 @@ def populate_experiment(run_config,
eval_config=eval_config, eval_config=eval_config,
eval_input_config=eval_input_config, eval_input_config=eval_input_config,
model_config=model_config) model_config=model_config)
predict_input_fn = create_predict_input_fn(model_config=model_config)
model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu)
if use_tpu_estimator:
estimator = tpu_estimator.TPUEstimator(
model_fn=model_fn,
train_batch_size=train_config.batch_size,
# For each core, only batch size 1 is supported for eval.
eval_batch_size=num_shards * 1 if use_tpu else 1,
use_tpu=use_tpu,
config=run_config,
params=params if params else {})
else:
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
export_strategies = [ # Write the as-run pipeline config to disk.
tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(
serving_input_fn=create_predict_input_fn(
model_config=model_config))
]
estimator = tf.estimator.Estimator(
model_fn=model_fn_creator(detection_model_fn, configs, hparams),
config=run_config)
if run_config.is_chief: if run_config.is_chief:
# Store the final pipeline config for traceability.
pipeline_config_final = create_pipeline_proto_from_configs( pipeline_config_final = create_pipeline_proto_from_configs(
configs) configs)
if not file_io.file_exists(estimator.model_dir): config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)
file_io.recursive_create_dir(estimator.model_dir)
pipeline_config_final_path = os.path.join(estimator.model_dir,
'pipeline.config')
config_text = text_format.MessageToString(pipeline_config_final)
with tf.gfile.Open(pipeline_config_final_path, 'wb') as f:
tf.logging.info('Writing as-run pipeline config file to %s',
pipeline_config_final_path)
f.write(config_text)
return tf.contrib.learn.Experiment( return dict(
estimator=estimator, estimator=estimator,
train_input_fn=train_input_fn, train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn, eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
train_steps=train_steps, train_steps=train_steps,
eval_steps=eval_steps, eval_steps=eval_steps)
export_strategies=export_strategies,
eval_delay_secs=120,)
def create_train_and_eval_specs(train_input_fn,
eval_input_fn,
predict_input_fn,
train_steps,
eval_steps,
eval_on_train_data=False,
final_exporter_name='Servo',
eval_spec_name='eval'):
"""Creates a `TrainSpec` and `EvalSpec`s.
Args:
train_input_fn: Function that produces features and labels on train data.
eval_input_fn: Function that produces features and labels on eval data.
predict_input_fn: Function that produces features for inference.
train_steps: Number of training steps.
eval_steps: Number of eval steps.
eval_on_train_data: Whether to evaluate model on training data. Default is
False.
final_exporter_name: String name given to `FinalExporter`.
eval_spec_name: String name given to main `EvalSpec`.
Returns:
Tuple of `TrainSpec` and list of `EvalSpecs`. The first `EvalSpec` is for
evaluation data. If `eval_on_train_data` is True, the second `EvalSpec` in
the list will correspond to training data.
"""
exporter = tf.estimator.FinalExporter(
name=final_exporter_name, serving_input_receiver_fn=predict_input_fn)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=train_steps)
eval_specs = [
tf.estimator.EvalSpec(
name=eval_spec_name,
input_fn=eval_input_fn,
steps=eval_steps,
exporters=exporter)
]
def main(unused_argv): if eval_on_train_data:
tf.flags.mark_flag_as_required('model_dir') eval_specs.append(
tf.flags.mark_flag_as_required('pipeline_config_path') tf.estimator.EvalSpec(
config = tf.contrib.learn.RunConfig(model_dir=FLAGS.model_dir) name='eval_on_train', input_fn=train_input_fn, steps=eval_steps))
learn_runner.run(
experiment_fn=build_experiment_fn(FLAGS.num_train_steps,
FLAGS.num_eval_steps),
run_config=config,
hparams=model_hparams.create_hparams())
return train_spec, eval_specs
if __name__ == '__main__':
tf.app.run() def populate_experiment(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
**kwargs):
"""Populates an `Experiment` object.
EXPERIMENT CLASS IS DEPRECATED. Please switch to
tf.estimator.train_and_evaluate. As an example, see model_main.py.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
An `Experiment` that defines all aspects of training, evaluation, and
export.
"""
tf.logging.warning('Experiment is being deprecated. Please use '
'tf.estimator.train_and_evaluate(). See model_main.py for '
'an example.')
train_and_eval_dict = create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
model_fn_creator=model_fn_creator,
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fn = train_and_eval_dict['eval_input_fn']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
eval_steps = train_and_eval_dict['eval_steps']
export_strategies = [
tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(
serving_input_fn=predict_input_fn)
]
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=train_steps,
eval_steps=eval_steps,
export_strategies=export_strategies,
eval_delay_secs=120,)
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
"""Tests for object detection model.""" """Tests for object detection model library."""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
...@@ -24,16 +24,20 @@ import os ...@@ -24,16 +24,20 @@ import os
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from object_detection import inputs from object_detection import inputs
from object_detection import model
from object_detection import model_hparams from object_detection import model_hparams
from object_detection import model_test_util from object_detection import model_lib
from object_detection.builders import model_builder from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields from object_detection.core import standard_fields as fields
from object_detection.utils import config_util from object_detection.utils import config_util
MODEL_NAME_FOR_TEST = model_test_util.SSD_INCEPTION_MODEL_NAME # Model for test. Options are:
# 'ssd_inception_v2_pets', 'faster_rcnn_resnet50_pets'
MODEL_NAME_FOR_TEST = 'ssd_inception_v2_pets'
def _get_data_path(): def _get_data_path():
...@@ -42,6 +46,12 @@ def _get_data_path(): ...@@ -42,6 +46,12 @@ def _get_data_path():
'pets_examples.record') 'pets_examples.record')
def get_pipeline_config_path(model_name):
"""Returns path to the local pipeline config file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'samples',
'configs', model_name + '.config')
def _get_labelmap_path(): def _get_labelmap_path():
"""Returns an absolute path to label map file.""" """Returns an absolute path to label map file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'data', return os.path.join(tf.resource_loader.get_data_files_path(), 'data',
...@@ -50,7 +60,7 @@ def _get_labelmap_path(): ...@@ -50,7 +60,7 @@ def _get_labelmap_path():
def _get_configs_for_model(model_name): def _get_configs_for_model(model_name):
"""Returns configurations for model.""" """Returns configurations for model."""
filename = model_test_util.GetPipelineConfigPath(model_name) filename = get_pipeline_config_path(model_name)
data_path = _get_data_path() data_path = _get_data_path()
label_map_path = _get_labelmap_path() label_map_path = _get_labelmap_path()
configs = config_util.get_configs_from_pipeline_file(filename) configs = config_util.get_configs_from_pipeline_file(filename)
...@@ -62,17 +72,14 @@ def _get_configs_for_model(model_name): ...@@ -62,17 +72,14 @@ def _get_configs_for_model(model_name):
return configs return configs
def setUpModule(): class ModelLibTest(tf.test.TestCase):
model_test_util.InitializeFlags(MODEL_NAME_FOR_TEST)
class ModelTflearnTest(tf.test.TestCase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
tf.reset_default_graph() tf.reset_default_graph()
def _assert_outputs_for_train_eval(self, configs, mode, class_agnostic=False): def _assert_model_fn_for_train_eval(self, configs, mode,
class_agnostic=False):
model_config = configs['model'] model_config = configs['model']
train_config = configs['train_config'] train_config = configs['train_config']
with tf.Graph().as_default(): with tf.Graph().as_default():
...@@ -95,7 +102,7 @@ class ModelTflearnTest(tf.test.TestCase): ...@@ -95,7 +102,7 @@ class ModelTflearnTest(tf.test.TestCase):
hparams = model_hparams.create_hparams( hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false') hparams_overrides='load_pretrained=false')
model_fn = model.create_model_fn(detection_model_fn, configs, hparams) model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams)
estimator_spec = model_fn(features, labels, mode) estimator_spec = model_fn(features, labels, mode)
self.assertIsNotNone(estimator_spec.loss) self.assertIsNotNone(estimator_spec.loss)
...@@ -118,7 +125,7 @@ class ModelTflearnTest(tf.test.TestCase): ...@@ -118,7 +125,7 @@ class ModelTflearnTest(tf.test.TestCase):
self.assertIsNotNone(estimator_spec.train_op) self.assertIsNotNone(estimator_spec.train_op)
return estimator_spec return estimator_spec
def _assert_outputs_for_predict(self, configs): def _assert_model_fn_for_predict(self, configs):
model_config = configs['model'] model_config = configs['model']
with tf.Graph().as_default(): with tf.Graph().as_default():
...@@ -132,7 +139,7 @@ class ModelTflearnTest(tf.test.TestCase): ...@@ -132,7 +139,7 @@ class ModelTflearnTest(tf.test.TestCase):
hparams = model_hparams.create_hparams( hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false') hparams_overrides='load_pretrained=false')
model_fn = model.create_model_fn(detection_model_fn, configs, hparams) model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams)
estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT) estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT)
self.assertIsNone(estimator_spec.loss) self.assertIsNone(estimator_spec.loss)
...@@ -142,27 +149,137 @@ class ModelTflearnTest(tf.test.TestCase): ...@@ -142,27 +149,137 @@ class ModelTflearnTest(tf.test.TestCase):
self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME, self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME,
estimator_spec.export_outputs) estimator_spec.export_outputs)
def testModelFnInTrainMode(self): def test_model_fn_in_train_mode(self):
"""Tests the model function in TRAIN mode.""" """Tests the model function in TRAIN mode."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
self._assert_outputs_for_train_eval(configs, tf.estimator.ModeKeys.TRAIN) self._assert_model_fn_for_train_eval(configs, tf.estimator.ModeKeys.TRAIN)
def testModelFnInEvalMode(self): def test_model_fn_in_eval_mode(self):
"""Tests the model function in EVAL mode.""" """Tests the model function in EVAL mode."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
self._assert_outputs_for_train_eval(configs, tf.estimator.ModeKeys.EVAL) self._assert_model_fn_for_train_eval(configs, tf.estimator.ModeKeys.EVAL)
def testModelFnInPredictMode(self): def test_model_fn_in_predict_mode(self):
"""Tests the model function in PREDICT mode.""" """Tests the model function in PREDICT mode."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
self._assert_outputs_for_predict(configs) self._assert_model_fn_for_predict(configs)
def testExperiment(self): def test_create_estimator_and_inputs(self):
"""Tests that Estimator and input function are constructed correctly."""
run_config = tf.estimator.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
train_steps = 20
eval_steps = 10
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps)
estimator = train_and_eval_dict['estimator']
train_steps = train_and_eval_dict['train_steps']
eval_steps = train_and_eval_dict['eval_steps']
self.assertIsInstance(estimator, tf.estimator.Estimator)
self.assertEqual(20, train_steps)
self.assertEqual(10, eval_steps)
def test_create_estimator_with_default_train_eval_steps(self):
"""Tests that number of train/eval defaults to config values."""
run_config = tf.estimator.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
config_train_steps = configs['train_config'].num_steps
config_eval_steps = configs['eval_config'].num_examples
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config, hparams, pipeline_config_path)
estimator = train_and_eval_dict['estimator']
train_steps = train_and_eval_dict['train_steps']
eval_steps = train_and_eval_dict['eval_steps']
self.assertIsInstance(estimator, tf.estimator.Estimator)
self.assertEqual(config_train_steps, train_steps)
self.assertEqual(config_eval_steps, eval_steps)
def test_create_tpu_estimator_and_inputs(self):
"""Tests that number of train/eval defaults to config values."""
run_config = tpu_config.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
train_steps = 20
eval_steps = 10
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
use_tpu_estimator=True)
estimator = train_and_eval_dict['estimator']
train_steps = train_and_eval_dict['train_steps']
eval_steps = train_and_eval_dict['eval_steps']
self.assertIsInstance(estimator, tpu_estimator.TPUEstimator)
self.assertEqual(20, train_steps)
self.assertEqual(10, eval_steps)
def test_create_train_and_eval_specs(self):
"""Tests that `TrainSpec` and `EvalSpec` is created correctly."""
run_config = tf.estimator.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
train_steps = 20
eval_steps = 10
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps)
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fn = train_and_eval_dict['eval_input_fn']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
eval_steps = train_and_eval_dict['eval_steps']
train_spec, eval_specs = model_lib.create_train_and_eval_specs(
train_input_fn,
eval_input_fn,
predict_input_fn,
train_steps,
eval_steps,
eval_on_train_data=True,
final_exporter_name='exporter',
eval_spec_name='holdout')
self.assertEqual(train_steps, train_spec.max_steps)
self.assertEqual(2, len(eval_specs))
self.assertEqual(eval_steps, eval_specs[0].steps)
self.assertEqual('holdout', eval_specs[0].name)
self.assertEqual('exporter', eval_specs[0].exporters[0].name)
self.assertEqual(eval_steps, eval_specs[1].steps)
self.assertEqual('eval_on_train', eval_specs[1].name)
def test_experiment(self):
"""Tests that the `Experiment` object is constructed correctly.""" """Tests that the `Experiment` object is constructed correctly."""
experiment = model_test_util.BuildExperiment() run_config = tf.estimator.RunConfig()
model_dir = experiment.estimator.model_dir hparams = model_hparams.create_hparams(
pipeline_config_path = os.path.join(model_dir, 'pipeline.config') hparams_overrides='load_pretrained=false')
self.assertTrue(tf.gfile.Exists(pipeline_config_path)) pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
experiment = model_lib.populate_experiment(
run_config,
hparams,
pipeline_config_path,
train_steps=10,
eval_steps=20)
self.assertEqual(10, experiment.train_steps)
self.assertEqual(20, experiment.eval_steps)
class UnbatchTensorsTest(tf.test.TestCase): class UnbatchTensorsTest(tf.test.TestCase):
...@@ -184,7 +301,7 @@ class UnbatchTensorsTest(tf.test.TestCase): ...@@ -184,7 +301,7 @@ class UnbatchTensorsTest(tf.test.TestCase):
fields.InputDataFields.groundtruth_weights: fields.InputDataFields.groundtruth_weights:
groundtruth_weights_placeholder groundtruth_weights_placeholder
} }
unbatched_tensor_dict = model.unstack_batch( unbatched_tensor_dict = model_lib.unstack_batch(
tensor_dict, unpad_groundtruth_tensors=False) tensor_dict, unpad_groundtruth_tensors=False)
with self.test_session() as sess: with self.test_session() as sess:
...@@ -231,7 +348,7 @@ class UnbatchTensorsTest(tf.test.TestCase): ...@@ -231,7 +348,7 @@ class UnbatchTensorsTest(tf.test.TestCase):
fields.InputDataFields.num_groundtruth_boxes: fields.InputDataFields.num_groundtruth_boxes:
num_groundtruth_placeholder num_groundtruth_placeholder
} }
unbatched_tensor_dict = model.unstack_batch( unbatched_tensor_dict = model_lib.unstack_batch(
tensor_dict, unpad_groundtruth_tensors=True) tensor_dict, unpad_groundtruth_tensors=True)
with self.test_session() as sess: with self.test_session() as sess:
unbatched_tensor_dict_out = sess.run( unbatched_tensor_dict_out = sess.run(
......
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary to run train and evaluation on object detection model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from object_detection import model_hparams
from object_detection import model_lib
flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
flags.DEFINE_integer('num_eval_steps', None, 'Number of train steps.')
flags.DEFINE_string(
'hparams_overrides', None, 'Hyperparameter overrides, '
'represented as a string containing comma-separated '
'hparam_name=value pairs.')
FLAGS = flags.FLAGS
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config=config,
hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
pipeline_config_path=FLAGS.pipeline_config_path,
train_steps=FLAGS.num_train_steps,
eval_steps=FLAGS.num_eval_steps)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fn = train_and_eval_dict['eval_input_fn']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
eval_steps = train_and_eval_dict['eval_steps']
train_spec, eval_specs = model_lib.create_train_and_eval_specs(
train_input_fn,
eval_input_fn,
predict_input_fn,
train_steps,
eval_steps,
eval_on_train_data=False)
# Currently only a single Eval Spec is allowed.
tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
if __name__ == '__main__':
tf.app.run()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utils for tests for object detection tflearn model."""
from __future__ import absolute_import
import os
import tempfile
import tensorflow as tf
from object_detection import model
from object_detection import model_hparams
FLAGS = tf.flags.FLAGS
FASTER_RCNN_MODEL_NAME = 'faster_rcnn_resnet50_pets'
SSD_INCEPTION_MODEL_NAME = 'ssd_inception_v2_pets'
def GetPipelineConfigPath(model_name):
"""Returns path to the local pipeline config file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'samples',
'configs', model_name + '.config')
def InitializeFlags(model_name_for_test):
FLAGS.model_dir = tempfile.mkdtemp()
FLAGS.pipeline_config_path = GetPipelineConfigPath(model_name_for_test)
def BuildExperiment():
"""Builds an Experiment object for testing purposes."""
run_config = tf.contrib.learn.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
# pylint: disable=protected-access
experiment_fn = model.build_experiment_fn(10, 10)
# pylint: enable=protected-access
return experiment_fn(run_config, hparams)
...@@ -22,177 +22,76 @@ from __future__ import absolute_import ...@@ -22,177 +22,76 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import functools
import os import os
from absl import flags
import tensorflow as tf import tensorflow as tf
from tensorflow.contrib.tpu.python.tpu import tpu_config from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.contrib.training.python.training import evaluation from tensorflow.contrib.training.python.training import evaluation
from object_detection import inputs
from object_detection import model
from object_detection import model_hparams from object_detection import model_hparams
from object_detection.builders import model_builder from object_detection import model_lib
from object_detection.utils import config_util
tf.flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than plain CPUs') tf.flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than plain CPUs')
# Cloud TPU Cluster Resolvers # Cloud TPU Cluster Resolvers
tf.flags.DEFINE_string( flags.DEFINE_string(
'gcp_project', 'gcp_project',
default=None, default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we ' help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.') 'will attempt to automatically detect the GCE project from metadata.')
tf.flags.DEFINE_string( flags.DEFINE_string(
'tpu_zone', 'tpu_zone',
default=None, default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we ' help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.') 'will attempt to automatically detect the GCE project from metadata.')
tf.flags.DEFINE_string( flags.DEFINE_string(
'tpu_name', 'tpu_name',
default=None, default=None,
help='Name of the Cloud TPU for Cluster Resolvers. You must specify either ' help='Name of the Cloud TPU for Cluster Resolvers. You must specify either '
'this flag or --master.') 'this flag or --master.')
tf.flags.DEFINE_string( flags.DEFINE_string(
'master', default=None, 'master',
default=None,
help='GRPC URL of the master (e.g. grpc://ip.address.of.tpu:8470). You ' help='GRPC URL of the master (e.g. grpc://ip.address.of.tpu:8470). You '
'must specify either this flag or --tpu_name.') 'must specify either this flag or --tpu_name.')
tf.flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU cores).') flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU cores).')
tf.flags.DEFINE_integer('iterations_per_loop', 100, flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop.') 'Number of iterations per TPU training loop.')
# For mode=train_and_eval, evaluation occurs after training is finished. # For mode=train_and_eval, evaluation occurs after training is finished.
# Note: independently of steps_per_checkpoint, estimator will save the most # Note: independently of steps_per_checkpoint, estimator will save the most
# recent checkpoint every 10 minutes by default for train_and_eval # recent checkpoint every 10 minutes by default for train_and_eval
tf.flags.DEFINE_string('mode', 'train_and_eval', flags.DEFINE_string('mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval') 'Mode to run: train, eval, train_and_eval')
tf.flags.DEFINE_integer('train_batch_size', 32 * 8, 'Batch size for training.') flags.DEFINE_integer('train_batch_size', 32 * 8, 'Batch size for training.')
# For EVAL. # For EVAL.
tf.flags.DEFINE_integer('min_eval_interval_secs', 180, flags.DEFINE_integer('min_eval_interval_secs', 180,
'Minimum seconds between evaluations.') 'Minimum seconds between evaluations.')
tf.flags.DEFINE_integer( flags.DEFINE_integer(
'eval_timeout_secs', None, 'eval_timeout_secs', None,
'Maximum seconds between checkpoints before evaluation terminates.') 'Maximum seconds between checkpoints before evaluation terminates.')
tf.flags.DEFINE_string('hparams_overrides', None, 'Comma-separated list of ' flags.DEFINE_string(
'hparams_overrides', None, 'Comma-separated list of '
'hyperparameters to override defaults.') 'hyperparameters to override defaults.')
tf.flags.DEFINE_boolean('eval_training_data', False, flags.DEFINE_boolean('eval_training_data', False,
'If training data should be evaluated for this job.') 'If training data should be evaluated for this job.')
flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
flags.DEFINE_integer('num_eval_steps', None, 'Number of train steps.')
FLAGS = tf.flags.FLAGS FLAGS = tf.flags.FLAGS
def create_estimator(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
train_batch_size=None,
model_fn_creator=model.create_model_fn,
use_tpu=False,
num_shards=1,
params=None,
**kwargs):
"""Creates an `Estimator` object.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
train_batch_size: Training batch size. If none, use batch size from
`TrainConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
use_tpu: Boolean, whether training and evaluation should run on TPU.
num_shards: Number of shards (TPU cores).
params: Parameter dictionary passed from the estimator.
**kwargs: Additional keyword arguments for configuration override.
Returns:
Estimator: A estimator object used for training and evaluation
train_input_fn: Input function for the training loop
eval_validation_input_fn: Input function to run for evaluation on
validation data.
eval_training_input_fn: Input function to run for evaluation on
training data.
train_steps: Number of training steps either from arg `train_steps` or
`TrainConfig` proto
eval_steps: Number of evaluation steps either from arg `eval_steps` or
`EvalConfig` proto
"""
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(
configs,
hparams,
train_steps=train_steps,
eval_steps=eval_steps,
batch_size=train_batch_size,
**kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_config = configs['eval_input_config']
if FLAGS.eval_training_data:
eval_input_config = configs['train_input_config']
if params is None:
params = {}
if train_steps is None and train_config.num_steps:
train_steps = train_config.num_steps
if eval_steps is None and eval_config.num_examples:
eval_steps = eval_config.num_examples
detection_model_fn = functools.partial(
model_builder.build, model_config=model_config)
# Create the input functions for TRAIN/EVAL.
train_input_fn = inputs.create_train_input_fn(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config)
eval_validation_input_fn = inputs.create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config)
eval_training_input_fn = inputs.create_eval_input_fn(
eval_config=eval_config,
eval_input_config=train_input_config,
model_config=model_config)
estimator = tpu_estimator.TPUEstimator(
model_fn=model_fn_creator(detection_model_fn, configs, hparams,
use_tpu),
train_batch_size=train_config.batch_size,
# For each core, only batch size 1 is supported for eval.
eval_batch_size=num_shards * 1 if use_tpu else 1,
use_tpu=use_tpu,
config=run_config,
params=params)
return (estimator, train_input_fn, eval_validation_input_fn,
eval_training_input_fn, train_steps, eval_steps)
def main(unused_argv): def main(unused_argv):
tf.flags.mark_flag_as_required('model_dir') flags.mark_flag_as_required('model_dir')
tf.flags.mark_flag_as_required('pipeline_config_path') flags.mark_flag_as_required('pipeline_config_path')
if FLAGS.master is None and FLAGS.tpu_name is None: if FLAGS.master is None and FLAGS.tpu_name is None:
raise RuntimeError('You must specify either --master or --tpu_name.') raise RuntimeError('You must specify either --master or --tpu_name.')
...@@ -217,28 +116,29 @@ def main(unused_argv): ...@@ -217,28 +116,29 @@ def main(unused_argv):
tpu_config=tpu_config.TPUConfig( tpu_config=tpu_config.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop, iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_shards)) num_shards=FLAGS.num_shards))
params = {}
(estimator, train_input_fn, eval_validation_input_fn, eval_training_input_fn, train_and_eval_dict = model_lib.create_estimator_and_inputs(
train_steps, eval_steps) = ( run_config=config,
create_estimator( hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
config, pipeline_config_path=FLAGS.pipeline_config_path,
model_hparams.create_hparams(
hparams_overrides=FLAGS.hparams_overrides),
FLAGS.pipeline_config_path,
train_steps=FLAGS.num_train_steps, train_steps=FLAGS.num_train_steps,
eval_steps=FLAGS.num_eval_steps, eval_steps=FLAGS.num_eval_steps,
train_batch_size=FLAGS.train_batch_size, use_tpu_estimator=True,
use_tpu=FLAGS.use_tpu, use_tpu=FLAGS.use_tpu,
num_shards=FLAGS.num_shards, num_shards=FLAGS.num_shards,
params=params)) batch_size=FLAGS.train_batch_size)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fn = train_and_eval_dict['eval_input_fn']
train_steps = train_and_eval_dict['train_steps']
eval_steps = train_and_eval_dict['eval_steps']
if FLAGS.mode in ['train', 'train_and_eval']: if FLAGS.mode in ['train', 'train_and_eval']:
estimator.train(input_fn=train_input_fn, max_steps=train_steps) estimator.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.mode == 'train_and_eval': if FLAGS.mode == 'train_and_eval':
# Eval one time. # Eval one time.
eval_results = estimator.evaluate( eval_results = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
input_fn=eval_validation_input_fn, steps=eval_steps)
tf.logging.info('Eval results: %s' % eval_results) tf.logging.info('Eval results: %s' % eval_results)
# Continuously evaluating. # Continuously evaluating.
...@@ -258,10 +158,10 @@ def main(unused_argv): ...@@ -258,10 +158,10 @@ def main(unused_argv):
tf.logging.info('Starting to evaluate.') tf.logging.info('Starting to evaluate.')
if FLAGS.eval_training_data: if FLAGS.eval_training_data:
name = 'training_data' name = 'training_data'
input_fn = eval_training_input_fn input_fn = train_input_fn
else: else:
name = 'validation_data' name = 'validation_data'
input_fn = eval_validation_input_fn input_fn = eval_input_fn
try: try:
eval_results = estimator.evaluate( eval_results = estimator.evaluate(
input_fn=input_fn, input_fn=input_fn,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment