Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
645202b1
Commit
645202b1
authored
Apr 11, 2019
by
Dong Lin
Committed by
Toby Boyd
Apr 11, 2019
Browse files
Move metrics info from extras to metrics field in test_log.proto (#6548)
parent
17ef6405
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
20 additions
and
76 deletions
+20
-76
official/resnet/estimator_cifar_benchmark.py
official/resnet/estimator_cifar_benchmark.py
+8
-35
official/resnet/keras/keras_benchmark.py
official/resnet/keras/keras_benchmark.py
+12
-41
No files found.
official/resnet/estimator_cifar_benchmark.py
View file @
645202b1
...
@@ -18,7 +18,6 @@ from __future__ import absolute_import
...
@@ -18,7 +18,6 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
division
from
__future__
import
print_function
from
__future__
import
print_function
import
json
import
os
import
os
import
time
import
time
...
@@ -127,48 +126,22 @@ class EstimatorCifar10BenchmarkTests(tf.test.Benchmark):
...
@@ -127,48 +126,22 @@ class EstimatorCifar10BenchmarkTests(tf.test.Benchmark):
break
break
eval_results
=
stats
[
'eval_results'
]
eval_results
=
stats
[
'eval_results'
]
extras
=
{}
metrics
=
[]
extras
[
'accuracy_top_1'
]
=
self
.
_json_description
(
metrics
.
append
({
'name'
:
'accuracy_top_1'
,
eval_results
[
'accuracy'
].
item
(),
'value'
:
eval_results
[
'accuracy'
].
item
()})
priority
=
0
)
metrics
.
append
({
'name'
:
'accuracy_top_5'
,
extras
[
'accuracy_top_5'
]
=
self
.
_json_description
(
'value'
:
eval_results
[
'accuracy_top_5'
].
item
()})
eval_results
[
'accuracy_top_5'
].
item
())
if
examples_per_sec_hook
:
if
examples_per_sec_hook
:
exp_per_second_list
=
examples_per_sec_hook
.
current_examples_per_sec_list
exp_per_second_list
=
examples_per_sec_hook
.
current_examples_per_sec_list
# ExamplesPerSecondHook skips the first 10 steps.
# ExamplesPerSecondHook skips the first 10 steps.
exp_per_sec
=
sum
(
exp_per_second_list
)
/
(
len
(
exp_per_second_list
))
exp_per_sec
=
sum
(
exp_per_second_list
)
/
(
len
(
exp_per_second_list
))
extras
[
'exp_per_second'
]
=
self
.
_json_description
(
exp_per_sec
)
metrics
.
append
({
'name'
:
'exp_per_second'
,
'value'
:
exp_per_sec
})
self
.
report_benchmark
(
self
.
report_benchmark
(
iters
=
eval_results
[
'global_step'
],
iters
=
eval_results
[
'global_step'
],
wall_time
=
wall_time_sec
,
wall_time
=
wall_time_sec
,
extras
=
extras
)
metrics
=
metrics
)
def
_json_description
(
self
,
value
,
priority
=
None
,
min_value
=
None
,
max_value
=
None
):
"""Get a json-formatted string describing the attributes for a metric."""
attributes
=
{}
attributes
[
'value'
]
=
value
if
priority
:
attributes
[
'priority'
]
=
priority
if
min_value
:
attributes
[
'min_value'
]
=
min_value
if
max_value
:
attributes
[
'max_value'
]
=
max_value
if
min_value
or
max_value
:
succeeded
=
True
if
min_value
and
value
<
min_value
:
succeeded
=
False
if
max_value
and
value
>
max_value
:
succeeded
=
False
attributes
[
'succeeded'
]
=
succeeded
return
json
.
dumps
(
attributes
)
def
_get_model_dir
(
self
,
folder_name
):
def
_get_model_dir
(
self
,
folder_name
):
return
os
.
path
.
join
(
self
.
output_dir
,
folder_name
)
return
os
.
path
.
join
(
self
.
output_dir
,
folder_name
)
...
...
official/resnet/keras/keras_benchmark.py
View file @
645202b1
...
@@ -19,8 +19,6 @@ from __future__ import division
...
@@ -19,8 +19,6 @@ from __future__ import division
from
__future__
import
print_function
from
__future__
import
print_function
import
os
import
os
import
time
import
json
from
absl
import
flags
from
absl
import
flags
from
absl.testing
import
flagsaver
from
absl.testing
import
flagsaver
...
@@ -77,15 +75,14 @@ class KerasBenchmark(tf.test.Benchmark):
...
@@ -77,15 +75,14 @@ class KerasBenchmark(tf.test.Benchmark):
warmup: number of entries in stats['step_timestamp_log'] to ignore.
warmup: number of entries in stats['step_timestamp_log'] to ignore.
"""
"""
extra
s
=
{}
metric
s
=
[]
if
'accuracy_top_1'
in
stats
:
if
'accuracy_top_1'
in
stats
:
extras
[
'accuracy_top_1'
]
=
self
.
_json_description
(
metrics
.
append
({
'name'
:
'accuracy_top_1'
,
stats
[
'accuracy_top_1'
],
'value'
:
stats
[
'accuracy_top_1'
],
priority
=
0
,
'min_value'
:
top_1_min
,
min_value
=
top_1_min
,
'max_value'
:
top_1_max
})
max_value
=
top_1_max
)
metrics
.
append
({
'name'
:
'top_1_train_accuracy'
,
extras
[
'top_1_train_accuracy'
]
=
self
.
_json_description
(
'value'
:
stats
[
'training_accuracy_top_1'
]})
stats
[
'training_accuracy_top_1'
],
priority
=
1
)
if
(
warmup
and
'step_timestamp_log'
in
stats
and
if
(
warmup
and
'step_timestamp_log'
in
stats
and
len
(
stats
[
'step_timestamp_log'
])
>
warmup
):
len
(
stats
[
'step_timestamp_log'
])
>
warmup
):
...
@@ -96,37 +93,11 @@ class KerasBenchmark(tf.test.Benchmark):
...
@@ -96,37 +93,11 @@ class KerasBenchmark(tf.test.Benchmark):
num_examples
=
(
num_examples
=
(
total_batch_size
*
log_steps
*
(
len
(
time_log
)
-
warmup
-
1
))
total_batch_size
*
log_steps
*
(
len
(
time_log
)
-
warmup
-
1
))
examples_per_sec
=
num_examples
/
elapsed
examples_per_sec
=
num_examples
/
elapsed
extras
[
'exp_per_second'
]
=
self
.
_json_description
(
metrics
.
append
({
'name'
:
'exp_per_second'
,
examples_per_sec
,
priority
=
2
)
'value'
:
examples_per_sec
}
)
if
'avg_exp_per_second'
in
stats
:
if
'avg_exp_per_second'
in
stats
:
extras
[
'avg_exp_per_second'
]
=
self
.
_json_description
(
metrics
.
append
({
'name'
:
'avg_exp_per_second'
,
stats
[
'avg_exp_per_second'
]
,
priority
=
3
)
'value'
:
stats
[
'avg_exp_per_second'
]
}
)
self
.
report_benchmark
(
iters
=-
1
,
wall_time
=
wall_time_sec
,
extras
=
extras
)
self
.
report_benchmark
(
iters
=-
1
,
wall_time
=
wall_time_sec
,
metrics
=
metrics
)
def
_json_description
(
self
,
value
,
priority
=
None
,
min_value
=
None
,
max_value
=
None
):
"""Get a json-formatted string describing the attributes for a metric"""
attributes
=
{}
attributes
[
'value'
]
=
value
if
priority
:
attributes
[
'priority'
]
=
priority
if
min_value
:
attributes
[
'min_value'
]
=
min_value
if
max_value
:
attributes
[
'max_value'
]
=
max_value
if
min_value
or
max_value
:
succeeded
=
True
if
min_value
and
value
<
min_value
:
succeeded
=
False
if
max_value
and
value
>
max_value
:
succeeded
=
False
attributes
[
'succeeded'
]
=
succeeded
return
json
.
dumps
(
attributes
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment