Commit 4c9ce20e authored by Marianne Linhares Monteiro's avatar Marianne Linhares Monteiro Committed by GitHub
Browse files

Small fixes on comments.

parent 0839edea
...@@ -74,52 +74,48 @@ tf.flags.DEFINE_float('momentum', 0.9, 'Momentum for MomentumOptimizer.') ...@@ -74,52 +74,48 @@ tf.flags.DEFINE_float('momentum', 0.9, 'Momentum for MomentumOptimizer.')
tf.flags.DEFINE_float('weight_decay', 2e-4, 'Weight decay for convolutions.') tf.flags.DEFINE_float('weight_decay', 2e-4, 'Weight decay for convolutions.')
tf.flags.DEFINE_float('learning_rate', 0.1, tf.flags.DEFINE_float('learning_rate', 0.1,
"""This is the inital learning rate value. 'This is the inital learning rate value.'
The learning rate will decrease during training. ' The learning rate will decrease during training.'
For more details check the model_fn implementation ' For more details check the model_fn implementation'
in this file. ' in this file.')
""")
tf.flags.DEFINE_boolean('use_distortion_for_training', True, tf.flags.DEFINE_boolean('use_distortion_for_training', True,
'If doing image distortion for training.') 'If doing image distortion for training.')
tf.flags.DEFINE_boolean('run_experiment', False, tf.flags.DEFINE_boolean('run_experiment', False,
"""If True will run an experiment, 'If True will run an experiment,'
otherwise will run training and evaluation ' otherwise will run training and evaluation'
using the estimator interface. ' using the estimator interface.'
Experiments perform training on several workers in ' Experiments perform training on several workers in'
parallel, in other words experiments know how to ' parallel, in other words experiments know how to'
invoke train and eval in a sensible fashion for ' invoke train and eval in a sensible fashion for'
distributed training. ' distributed training.')
""")
tf.flags.DEFINE_boolean('sync', False, tf.flags.DEFINE_boolean('sync', False,
"""If true when running in a distributed environment 'If true when running in a distributed environment'
will run on sync mode. ' will run on sync mode.')
""")
tf.flags.DEFINE_integer('num_workers', 1, 'Number of workers.') tf.flags.DEFINE_integer('num_workers', 1, 'Number of workers.')
# Perf flags # Perf flags
tf.flags.DEFINE_integer('num_intra_threads', 1, tf.flags.DEFINE_integer('num_intra_threads', 1,
"""Number of threads to use for intra-op parallelism. 'Number of threads to use for intra-op parallelism.'
If set to 0, the system will pick an appropriate number. ' If set to 0, the system will pick an appropriate number.'
The default is 1 since in this example CPU only handles ' The default is 1 since in this example CPU only handles'
the input pipeline and gradient aggregation (when ' the input pipeline and gradient aggregation (when'
--is_cpu_ps). Ops that could potentially benefit ' --is_cpu_ps). Ops that could potentially benefit'
from intra-op parallelism are scheduled to run on GPUs. ' from intra-op parallelism are scheduled to run on GPUs.')
""")
tf.flags.DEFINE_integer('num_inter_threads', 0, tf.flags.DEFINE_integer('num_inter_threads', 0,
"""Number of threads to use for inter-op 'Number of threads to use for inter-op
parallelism. If set to 0, the system will pick ' parallelism. If set to 0, the system will pick
an appropriate number.""") ' an appropriate number.')
tf.flags.DEFINE_boolean('force_gpu_compatible', False, tf.flags.DEFINE_boolean('force_gpu_compatible', False,
"""whether to enable force_gpu_compatible in 'Whether to enable force_gpu_compatible in'
GPU_Options. Check ' GPU_Options. Check'
tensorflow/core/protobuf/config.proto#L69 ' tensorflow/core/protobuf/config.proto#L69'
for details.""") ' for details.')
# Debugging flags # Debugging flags
tf.flags.DEFINE_boolean('log_device_placement', False, tf.flags.DEFINE_boolean('log_device_placement', False,
...@@ -368,7 +364,7 @@ def _tower_fn(is_training, weight_decay, feature, label, tower_losses, ...@@ -368,7 +364,7 @@ def _tower_fn(is_training, weight_decay, feature, label, tower_losses,
"""Build computation tower for each device (CPU or GPU). """Build computation tower for each device (CPU or GPU).
Args: Args:
is_training: true if is for training graph. is_training: true if is training graph.
weight_decay: weight regularization strength, a float. weight_decay: weight regularization strength, a float.
feature: a Tensor. feature: a Tensor.
label: a Tensor. label: a Tensor.
...@@ -452,17 +448,18 @@ def get_experiment_fn(train_input_fn, eval_input_fn, train_steps, eval_steps, ...@@ -452,17 +448,18 @@ def get_experiment_fn(train_input_fn, eval_input_fn, train_steps, eval_steps,
""" """
def _experiment_fn(run_config, hparams): def _experiment_fn(run_config, hparams):
"""Returns an Experiment.""" """Returns an Experiment."""
del hparams # unused arg del hparams # Unused arg.
# create estimator # Create estimator.
classifier = tf.estimator.Estimator(model_fn=_resnet_model_fn, classifier = tf.estimator.Estimator(model_fn=_resnet_model_fn,
config=run_config) config=run_config)
# Create experiment.
experiment = tf.contrib.learn.Experiment( experiment = tf.contrib.learn.Experiment(
classifier, classifier,
train_input_fn=train_input_fn, train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn, eval_input_fn=eval_input_fn,
train_steps=train_steps, train_steps=train_steps,
eval_steps=eval_steps) eval_steps=eval_steps)
# adding hooks to estimator on training mode # Adding hooks to be used by the estimator on training mode.
experiment.extend_train_hooks(train_hooks) experiment.extend_train_hooks(train_hooks)
return experiment return experiment
return _experiment_fn return _experiment_fn
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment