Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
2479cd93
Commit
2479cd93
authored
Aug 04, 2017
by
Toby Boyd
Committed by
GitHub
Aug 04, 2017
Browse files
Merge pull request #2131 from mari-linhares/patch-10
Fixing comments style
parents
424f8da6
8c019479
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
31 additions
and
34 deletions
+31
-34
tutorials/image/cifar10_estimator/cifar10_main.py
tutorials/image/cifar10_estimator/cifar10_main.py
+31
-34
No files found.
tutorials/image/cifar10_estimator/cifar10_main.py
View file @
2479cd93
...
...
@@ -74,52 +74,48 @@ tf.flags.DEFINE_float('momentum', 0.9, 'Momentum for MomentumOptimizer.')
tf
.
flags
.
DEFINE_float
(
'weight_decay'
,
2e-4
,
'Weight decay for convolutions.'
)
tf
.
flags
.
DEFINE_float
(
'learning_rate'
,
0.1
,
"""This is the inital learning rate value.
The learning rate will decrease during training.
For more details check the model_fn implementation
in this file.
"""
)
'This is the inital learning rate value.'
' The learning rate will decrease during training.'
' For more details check the model_fn implementation'
' in this file.'
)
tf
.
flags
.
DEFINE_boolean
(
'use_distortion_for_training'
,
True
,
'If doing image distortion for training.'
)
tf
.
flags
.
DEFINE_boolean
(
'run_experiment'
,
False
,
"""If True will run an experiment,
otherwise will run training and evaluation
using the estimator interface.
Experiments perform training on several workers in
parallel, in other words experiments know how to
invoke train and eval in a sensible fashion for
distributed training.
"""
)
'If True will run an experiment,'
' otherwise will run training and evaluation'
' using the estimator interface.'
' Experiments perform training on several workers in'
' parallel, in other words experiments know how to'
' invoke train and eval in a sensible fashion for'
' distributed training.'
)
tf
.
flags
.
DEFINE_boolean
(
'sync'
,
False
,
"""If true when running in a distributed environment
will run on sync mode.
"""
)
'If true when running in a distributed environment'
' will run on sync mode.'
)
tf
.
flags
.
DEFINE_integer
(
'num_workers'
,
1
,
'Number of workers.'
)
# Perf flags
tf
.
flags
.
DEFINE_integer
(
'num_intra_threads'
,
1
,
"""Number of threads to use for intra-op parallelism.
If set to 0, the system will pick an appropriate number.
The default is 1 since in this example CPU only handles
the input pipeline and gradient aggregation (when
--is_cpu_ps). Ops that could potentially benefit
from intra-op parallelism are scheduled to run on GPUs.
"""
)
'Number of threads to use for intra-op parallelism.'
' If set to 0, the system will pick an appropriate number.'
' The default is 1 since in this example CPU only handles'
' the input pipeline and gradient aggregation (when'
' --is_cpu_ps). Ops that could potentially benefit'
' from intra-op parallelism are scheduled to run on GPUs.'
)
tf
.
flags
.
DEFINE_integer
(
'num_inter_threads'
,
0
,
"""
Number of threads to use for inter-op
parallelism. If set to 0, the system will pick
an appropriate number.
"""
)
'
Number of threads to use for inter-op
'
'
parallelism. If set to 0, the system will pick
'
'
an appropriate number.
'
)
tf
.
flags
.
DEFINE_boolean
(
'force_gpu_compatible'
,
False
,
"""w
hether to enable force_gpu_compatible in
GPU_Options. Check
tensorflow/core/protobuf/config.proto#L69
for details.
"""
)
'W
hether to enable force_gpu_compatible in
'
'
GPU_Options. Check
'
'
tensorflow/core/protobuf/config.proto#L69
'
'
for details.
'
)
# Debugging flags
tf
.
flags
.
DEFINE_boolean
(
'log_device_placement'
,
False
,
...
...
@@ -368,7 +364,7 @@ def _tower_fn(is_training, weight_decay, feature, label, tower_losses,
"""Build computation tower for each device (CPU or GPU).
Args:
is_training: true if is
for
training graph.
is_training: true if is training graph.
weight_decay: weight regularization strength, a float.
feature: a Tensor.
label: a Tensor.
...
...
@@ -452,17 +448,18 @@ def get_experiment_fn(train_input_fn, eval_input_fn, train_steps, eval_steps,
"""
def
_experiment_fn
(
run_config
,
hparams
):
"""Returns an Experiment."""
del
hparams
#
u
nused arg
#
c
reate estimator
del
hparams
#
U
nused arg
.
#
C
reate estimator
.
classifier
=
tf
.
estimator
.
Estimator
(
model_fn
=
_resnet_model_fn
,
config
=
run_config
)
# Create experiment.
experiment
=
tf
.
contrib
.
learn
.
Experiment
(
classifier
,
train_input_fn
=
train_input_fn
,
eval_input_fn
=
eval_input_fn
,
train_steps
=
train_steps
,
eval_steps
=
eval_steps
)
#
a
dding hooks to estimator on training mode
#
A
dding hooks to
be used by the
estimator on training mode
.
experiment
.
extend_train_hooks
(
train_hooks
)
return
experiment
return
_experiment_fn
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment