Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
09e0cdcc
Commit
09e0cdcc
authored
Jan 09, 2017
by
Neal Wu
Committed by
GitHub
Jan 09, 2017
Browse files
Merge pull request #873 from tensorflow/add-arguments
Updated calls to '..._cross_entropy_with_logits' to add arguments
parents
fc1c9b1e
cc1fb668
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
3 additions
and
3 deletions
+3
-3
tutorials/embedding/word2vec.py
tutorials/embedding/word2vec.py
+2
-2
tutorials/image/cifar10/cifar10.py
tutorials/image/cifar10/cifar10.py
+1
-1
No files found.
tutorials/embedding/word2vec.py
View file @
09e0cdcc
...
@@ -263,9 +263,9 @@ class Word2Vec(object):
...
@@ -263,9 +263,9 @@ class Word2Vec(object):
# cross-entropy(logits, labels)
# cross-entropy(logits, labels)
opts
=
self
.
_options
opts
=
self
.
_options
true_xent
=
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
true_xent
=
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
true_logits
,
tf
.
ones_like
(
true_logits
))
labels
=
tf
.
ones_like
(
true_logits
)
,
logits
=
true_logits
)
sampled_xent
=
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
sampled_xent
=
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
sampled_logits
,
tf
.
zeros_like
(
sampled_logits
))
labels
=
tf
.
zeros_like
(
sampled_logits
)
,
logits
=
sampled_logits
)
# NCE-loss is the sum of the true and noise (sampled words)
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
# contributions, averaged over the batch.
...
...
tutorials/image/cifar10/cifar10.py
View file @
09e0cdcc
...
@@ -286,7 +286,7 @@ def loss(logits, labels):
...
@@ -286,7 +286,7 @@ def loss(logits, labels):
# Calculate the average cross entropy loss across the batch.
# Calculate the average cross entropy loss across the batch.
labels
=
tf
.
cast
(
labels
,
tf
.
int64
)
labels
=
tf
.
cast
(
labels
,
tf
.
int64
)
cross_entropy
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
cross_entropy
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
l
ogits
=
logits
,
labels
=
label
s
,
name
=
'cross_entropy_per_example'
)
l
abels
=
labels
,
logits
=
logit
s
,
name
=
'cross_entropy_per_example'
)
cross_entropy_mean
=
tf
.
reduce_mean
(
cross_entropy
,
name
=
'cross_entropy'
)
cross_entropy_mean
=
tf
.
reduce_mean
(
cross_entropy
,
name
=
'cross_entropy'
)
tf
.
add_to_collection
(
'losses'
,
cross_entropy_mean
)
tf
.
add_to_collection
(
'losses'
,
cross_entropy_mean
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment