Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
b548c7fd
Commit
b548c7fd
authored
Jul 06, 2020
by
A. Unique TensorFlower
Browse files
Replace tf.to_float, tf.to_int with tf.cast
PiperOrigin-RevId: 319744469
parent
6c63efed
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
20 additions
and
19 deletions
+20
-19
official/nlp/transformer/utils/metrics.py
official/nlp/transformer/utils/metrics.py
+20
-19
No files found.
official/nlp/transformer/utils/metrics.py
View file @
b548c7fd
...
...
@@ -67,7 +67,7 @@ def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
# Calculate smoothing cross entropy
with
tf
.
name_scope
(
"smoothing_cross_entropy"
,
values
=
[
logits
,
labels
]):
confidence
=
1.0
-
smoothing
low_confidence
=
(
1.0
-
confidence
)
/
tf
.
to_floa
t
(
vocab_size
-
1
)
low_confidence
=
(
1.0
-
confidence
)
/
tf
.
cas
t
(
vocab_size
-
1
,
tf
.
float32
)
soft_targets
=
tf
.
one_hot
(
tf
.
cast
(
labels
,
tf
.
int32
),
depth
=
vocab_size
,
...
...
@@ -79,11 +79,11 @@ def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant
=
-
(
confidence
*
tf
.
log
(
confidence
)
+
tf
.
to_floa
t
(
vocab_size
-
1
)
*
low_confidence
*
tf
.
log
(
low_confidence
+
1e-20
))
confidence
*
tf
.
log
(
confidence
)
+
tf
.
cas
t
(
vocab_size
-
1
,
tf
.
float32
)
*
low_confidence
*
tf
.
log
(
low_confidence
+
1e-20
))
xentropy
-=
normalizing_constant
weights
=
tf
.
to_floa
t
(
tf
.
not_equal
(
labels
,
0
))
weights
=
tf
.
cas
t
(
tf
.
not_equal
(
labels
,
0
)
,
tf
.
float32
)
return
xentropy
*
weights
,
weights
...
...
@@ -142,24 +142,24 @@ def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with
tf
.
variable_scope
(
"padded_accuracy"
,
values
=
[
logits
,
labels
]):
logits
,
labels
=
_pad_tensors_to_same_length
(
logits
,
labels
)
weights
=
tf
.
to_floa
t
(
tf
.
not_equal
(
labels
,
0
))
outputs
=
tf
.
to_int32
(
tf
.
argmax
(
logits
,
axis
=-
1
))
padded_labels
=
tf
.
to_int32
(
labels
)
return
tf
.
to_floa
t
(
tf
.
equal
(
outputs
,
padded_labels
)),
weights
weights
=
tf
.
cas
t
(
tf
.
not_equal
(
labels
,
0
)
,
tf
.
float32
)
outputs
=
tf
.
cast
(
tf
.
argmax
(
logits
,
axis
=-
1
)
,
tf
.
int32
)
padded_labels
=
tf
.
cast
(
labels
,
tf
.
int32
)
return
tf
.
cas
t
(
tf
.
equal
(
outputs
,
padded_labels
)
,
tf
.
float32
),
weights
def
padded_accuracy_topk
(
logits
,
labels
,
k
):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with
tf
.
variable_scope
(
"padded_accuracy_topk"
,
values
=
[
logits
,
labels
]):
logits
,
labels
=
_pad_tensors_to_same_length
(
logits
,
labels
)
weights
=
tf
.
to_floa
t
(
tf
.
not_equal
(
labels
,
0
))
weights
=
tf
.
cas
t
(
tf
.
not_equal
(
labels
,
0
)
,
tf
.
float32
)
effective_k
=
tf
.
minimum
(
k
,
tf
.
shape
(
logits
)[
-
1
])
_
,
outputs
=
tf
.
nn
.
top_k
(
logits
,
k
=
effective_k
)
outputs
=
tf
.
to_int32
(
outputs
)
padded_labels
=
tf
.
to_int32
(
labels
)
outputs
=
tf
.
cast
(
outputs
,
tf
.
int32
)
padded_labels
=
tf
.
cast
(
labels
,
tf
.
int32
)
padded_labels
=
tf
.
expand_dims
(
padded_labels
,
axis
=-
1
)
padded_labels
+=
tf
.
zeros_like
(
outputs
)
# Pad to same shape.
same
=
tf
.
to_floa
t
(
tf
.
equal
(
outputs
,
padded_labels
))
same
=
tf
.
cas
t
(
tf
.
equal
(
outputs
,
padded_labels
)
,
tf
.
float32
)
same_topk
=
tf
.
reduce_sum
(
same
,
axis
=-
1
)
return
same_topk
,
weights
...
...
@@ -172,10 +172,11 @@ def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with
tf
.
variable_scope
(
"padded_sequence_accuracy"
,
values
=
[
logits
,
labels
]):
logits
,
labels
=
_pad_tensors_to_same_length
(
logits
,
labels
)
weights
=
tf
.
to_float
(
tf
.
not_equal
(
labels
,
0
))
outputs
=
tf
.
to_int32
(
tf
.
argmax
(
logits
,
axis
=-
1
))
padded_labels
=
tf
.
to_int32
(
labels
)
not_correct
=
tf
.
to_float
(
tf
.
not_equal
(
outputs
,
padded_labels
))
*
weights
weights
=
tf
.
cast
(
tf
.
not_equal
(
labels
,
0
),
tf
.
float32
)
outputs
=
tf
.
cast
(
tf
.
argmax
(
logits
,
axis
=-
1
),
tf
.
int32
)
padded_labels
=
tf
.
cast
(
labels
,
tf
.
int32
)
not_correct
=
(
tf
.
cast
(
tf
.
not_equal
(
outputs
,
padded_labels
),
tf
.
float32
)
*
weights
)
axis
=
list
(
range
(
1
,
len
(
outputs
.
get_shape
())))
correct_seq
=
1.0
-
tf
.
minimum
(
1.0
,
tf
.
reduce_sum
(
not_correct
,
axis
=
axis
))
return
correct_seq
,
tf
.
constant
(
1.0
)
...
...
@@ -201,7 +202,7 @@ def bleu_score(logits, labels):
Returns:
bleu: int, approx bleu score
"""
predictions
=
tf
.
to_int32
(
tf
.
argmax
(
logits
,
axis
=-
1
))
predictions
=
tf
.
cast
(
tf
.
argmax
(
logits
,
axis
=-
1
)
,
tf
.
int32
)
# TODO: Look into removing use of py_func
bleu
=
tf
.
py_func
(
compute_bleu
,
(
labels
,
predictions
),
tf
.
float32
)
return
bleu
,
tf
.
constant
(
1.0
)
...
...
@@ -306,7 +307,7 @@ def rouge_2_fscore(logits, labels):
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions
=
tf
.
to_int32
(
tf
.
argmax
(
logits
,
axis
=-
1
))
predictions
=
tf
.
cast
(
tf
.
argmax
(
logits
,
axis
=-
1
)
,
tf
.
int32
)
# TODO: Look into removing use of py_func
rouge_2_f_score
=
tf
.
py_func
(
rouge_n
,
(
predictions
,
labels
),
tf
.
float32
)
return
rouge_2_f_score
,
tf
.
constant
(
1.0
)
...
...
@@ -383,7 +384,7 @@ def rouge_l_fscore(predictions, labels):
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs
=
tf
.
to_int32
(
tf
.
argmax
(
predictions
,
axis
=-
1
))
outputs
=
tf
.
cast
(
tf
.
argmax
(
predictions
,
axis
=-
1
)
,
tf
.
int32
)
rouge_l_f_score
=
tf
.
py_func
(
rouge_l_sentence_level
,
(
outputs
,
labels
),
tf
.
float32
)
return
rouge_l_f_score
,
tf
.
constant
(
1.0
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment