Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
eb62b917
Commit
eb62b917
authored
Feb 13, 2017
by
Martin Wicke
Committed by
GitHub
Feb 13, 2017
Browse files
Merge pull request #1010 from tensorflow/update-cifar10-summaries
Updated summaries in the tutorial models to 1.0
parents
2fd3dcf3
337c66ed
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
17 additions
and
20 deletions
+17
-20
tutorials/embedding/word2vec.py
tutorials/embedding/word2vec.py
+1
-1
tutorials/image/cifar10/cifar10.py
tutorials/image/cifar10/cifar10.py
+7
-7
tutorials/image/cifar10/cifar10_input.py
tutorials/image/cifar10/cifar10_input.py
+1
-1
tutorials/image/cifar10/cifar10_multi_gpu_train.py
tutorials/image/cifar10/cifar10_multi_gpu_train.py
+5
-8
tutorials/rnn/ptb/ptb_word_lm.py
tutorials/rnn/ptb/ptb_word_lm.py
+3
-3
No files found.
tutorials/embedding/word2vec.py
View file @
eb62b917
...
...
@@ -365,7 +365,7 @@ class Word2Vec(object):
self
.
_word2id
[
w
]
=
i
true_logits
,
sampled_logits
=
self
.
forward
(
examples
,
labels
)
loss
=
self
.
nce_loss
(
true_logits
,
sampled_logits
)
tf
.
s
calar_summary
(
"NCE loss"
,
loss
)
tf
.
s
ummary
.
scalar
(
"NCE loss"
,
loss
)
self
.
_loss
=
loss
self
.
optimize
(
loss
)
...
...
tutorials/image/cifar10/cifar10.py
View file @
eb62b917
...
...
@@ -90,8 +90,8 @@ def _activation_summary(x):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name
=
re
.
sub
(
'%s_[0-9]*/'
%
TOWER_NAME
,
''
,
x
.
op
.
name
)
tf
.
contrib
.
deprecated
.
histogram
_summary
(
tensor_name
+
'/activations'
,
x
)
tf
.
contrib
.
deprecated
.
scalar_
summary
(
tensor_name
+
'/sparsity'
,
tf
.
summary
.
histogram
(
tensor_name
+
'/activations'
,
x
)
tf
.
summary
.
scalar
(
tensor_name
+
'/sparsity'
,
tf
.
nn
.
zero_fraction
(
x
))
...
...
@@ -316,8 +316,8 @@ def _add_loss_summaries(total_loss):
for
l
in
losses
+
[
total_loss
]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf
.
contrib
.
deprecated
.
scalar_
summary
(
l
.
op
.
name
+
' (raw)'
,
l
)
tf
.
contrib
.
deprecated
.
scalar_
summary
(
l
.
op
.
name
,
loss_averages
.
average
(
l
))
tf
.
summary
.
scalar
(
l
.
op
.
name
+
' (raw)'
,
l
)
tf
.
summary
.
scalar
(
l
.
op
.
name
,
loss_averages
.
average
(
l
))
return
loss_averages_op
...
...
@@ -345,7 +345,7 @@ def train(total_loss, global_step):
decay_steps
,
LEARNING_RATE_DECAY_FACTOR
,
staircase
=
True
)
tf
.
contrib
.
deprecated
.
scalar_
summary
(
'learning_rate'
,
lr
)
tf
.
summary
.
scalar
(
'learning_rate'
,
lr
)
# Generate moving averages of all losses and associated summaries.
loss_averages_op
=
_add_loss_summaries
(
total_loss
)
...
...
@@ -360,12 +360,12 @@ def train(total_loss, global_step):
# Add histograms for trainable variables.
for
var
in
tf
.
trainable_variables
():
tf
.
contrib
.
deprecated
.
histogram
_summary
(
var
.
op
.
name
,
var
)
tf
.
summary
.
histogram
(
var
.
op
.
name
,
var
)
# Add histograms for gradients.
for
grad
,
var
in
grads
:
if
grad
is
not
None
:
tf
.
contrib
.
deprecated
.
histogram
_summary
(
var
.
op
.
name
+
'/gradients'
,
grad
)
tf
.
summary
.
histogram
(
var
.
op
.
name
+
'/gradients'
,
grad
)
# Track the moving averages of all trainable variables.
variable_averages
=
tf
.
train
.
ExponentialMovingAverage
(
...
...
tutorials/image/cifar10/cifar10_input.py
View file @
eb62b917
...
...
@@ -132,7 +132,7 @@ def _generate_image_and_label_batch(image, label, min_queue_examples,
capacity
=
min_queue_examples
+
3
*
batch_size
)
# Display the training images in the visualizer.
tf
.
contrib
.
deprecated
.
image_
summary
(
'images'
,
images
)
tf
.
summary
.
image
(
'images'
,
images
)
return
images
,
tf
.
reshape
(
label_batch
,
[
batch_size
])
...
...
tutorials/image/cifar10/cifar10_multi_gpu_train.py
View file @
eb62b917
...
...
@@ -93,7 +93,7 @@ def tower_loss(scope):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name
=
re
.
sub
(
'%s_[0-9]*/'
%
cifar10
.
TOWER_NAME
,
''
,
l
.
op
.
name
)
tf
.
contrib
.
deprecated
.
scalar_
summary
(
loss_name
,
l
)
tf
.
summary
.
scalar
(
loss_name
,
l
)
return
total_loss
...
...
@@ -187,22 +187,19 @@ def train():
grads
=
average_gradients
(
tower_grads
)
# Add a summary to track the learning rate.
summaries
.
append
(
tf
.
contrib
.
deprecated
.
scalar_
summary
(
'learning_rate'
,
lr
))
summaries
.
append
(
tf
.
summary
.
scalar
(
'learning_rate'
,
lr
))
# Add histograms for gradients.
for
grad
,
var
in
grads
:
if
grad
is
not
None
:
summaries
.
append
(
tf
.
contrib
.
deprecated
.
histogram_summary
(
var
.
op
.
name
+
'/gradients'
,
grad
))
summaries
.
append
(
tf
.
summary
.
histogram
(
var
.
op
.
name
+
'/gradients'
,
grad
))
# Apply the gradients to adjust the shared variables.
apply_gradient_op
=
opt
.
apply_gradients
(
grads
,
global_step
=
global_step
)
# Add histograms for trainable variables.
for
var
in
tf
.
trainable_variables
():
summaries
.
append
(
tf
.
contrib
.
deprecated
.
histogram_summary
(
var
.
op
.
name
,
var
))
summaries
.
append
(
tf
.
summary
.
histogram
(
var
.
op
.
name
,
var
))
# Track the moving averages of all trainable variables.
variable_averages
=
tf
.
train
.
ExponentialMovingAverage
(
...
...
@@ -216,7 +213,7 @@ def train():
saver
=
tf
.
train
.
Saver
(
tf
.
global_variables
())
# Build the summary operation from the last tower summaries.
summary_op
=
tf
.
contrib
.
deprecated
.
merge_
summary
(
summaries
)
summary_op
=
tf
.
summary
.
merge
(
summaries
)
# Build an initialization operation to run below.
init
=
tf
.
global_variables_initializer
()
...
...
tutorials/rnn/ptb/ptb_word_lm.py
View file @
eb62b917
...
...
@@ -334,14 +334,14 @@ def main(_):
train_input
=
PTBInput
(
config
=
config
,
data
=
train_data
,
name
=
"TrainInput"
)
with
tf
.
variable_scope
(
"Model"
,
reuse
=
None
,
initializer
=
initializer
):
m
=
PTBModel
(
is_training
=
True
,
config
=
config
,
input_
=
train_input
)
tf
.
s
calar_summary
(
"Training Loss"
,
m
.
cost
)
tf
.
s
calar_summary
(
"Learning Rate"
,
m
.
lr
)
tf
.
s
ummary
.
scalar
(
"Training Loss"
,
m
.
cost
)
tf
.
s
ummary
.
scalar
(
"Learning Rate"
,
m
.
lr
)
with
tf
.
name_scope
(
"Valid"
):
valid_input
=
PTBInput
(
config
=
config
,
data
=
valid_data
,
name
=
"ValidInput"
)
with
tf
.
variable_scope
(
"Model"
,
reuse
=
True
,
initializer
=
initializer
):
mvalid
=
PTBModel
(
is_training
=
False
,
config
=
config
,
input_
=
valid_input
)
tf
.
s
calar_summary
(
"Validation Loss"
,
mvalid
.
cost
)
tf
.
s
ummary
.
scalar
(
"Validation Loss"
,
mvalid
.
cost
)
with
tf
.
name_scope
(
"Test"
):
test_input
=
PTBInput
(
config
=
eval_config
,
data
=
test_data
,
name
=
"TestInput"
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment