Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
4c872f63
Commit
4c872f63
authored
Nov 21, 2019
by
Sai Ganesh Bandiatmakuri
Committed by
A. Unique TensorFlower
Nov 21, 2019
Browse files
Internal change
PiperOrigin-RevId: 281846531
parent
275afa5c
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
83 additions
and
0 deletions
+83
-0
official/utils/testing/benchmark_wrappers.py
official/utils/testing/benchmark_wrappers.py
+83
-0
No files found.
official/utils/testing/benchmark_wrappers.py
0 → 100644
View file @
4c872f63
# Lint as: python3
"""Utils to annotate and trace benchmarks."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
from
absl
import
flags
from
absl
import
logging
from
absl.testing
import
flagsaver
FLAGS
=
flags
.
FLAGS
flags
.
DEFINE_multi_string
(
'benchmark_method_flags'
,
None
,
'Optional list of runtime flags of the form key=value. Specify '
'multiple times to specify different flags. These will override the FLAGS '
'object directly after hardcoded settings in individual benchmark methods '
'before they call _run_and_report benchmark. Example if we set '
'--benchmark_method_flags=train_steps=10 and a benchmark method hardcodes '
'FLAGS.train_steps=10000 and later calls _run_and_report_benchmark, '
'it
\'
ll only run for 10 steps. This is useful for '
'debugging/profiling workflows.'
)
def
enable_runtime_flags
(
decorated_func
):
"""Sets attributes from --benchmark_method_flags for method execution.
@enable_runtime_flags decorator temporarily adds flags passed in via
--benchmark_method_flags and runs the decorated function in that context.
A user can set --benchmark_method_flags=train_steps=5 to run the benchmark
method in the snippet below with FLAGS.train_steps=5 for debugging (without
modifying the benchmark code).
class ModelBenchmark():
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self):
# run benchmark ...
# report benchmark results ...
def benchmark_method(self):
FLAGS.train_steps = 1000
...
self._run_and_report_benchmark()
Args:
decorated_func: The method that runs the benchmark after previous setup
execution that set some flags.
Returns:
new_func: The same method which executes in a temporary context where flag
overrides from --benchmark_method_flags are active.
"""
def
runner
(
*
args
,
**
kwargs
):
"""Creates a temporary context to activate --benchmark_method_flags."""
if
FLAGS
.
benchmark_method_flags
:
saved_flag_values
=
flagsaver
.
save_flag_values
()
for
key_value
in
FLAGS
.
benchmark_method_flags
:
key
,
value
=
key_value
.
split
(
'='
,
1
)
try
:
numeric_float
=
float
(
value
)
numeric_int
=
int
(
numeric_float
)
if
abs
(
numeric_int
)
==
abs
(
numeric_float
):
flag_value
=
numeric_int
else
:
flag_value
=
numeric_float
except
ValueError
:
flag_value
=
value
logging
.
info
(
'Setting --%s=%s'
,
key
,
flag_value
)
setattr
(
FLAGS
,
key
,
flag_value
)
else
:
saved_flag_values
=
None
try
:
result
=
decorated_func
(
*
args
,
**
kwargs
)
return
result
finally
:
if
saved_flag_values
:
flagsaver
.
restore_flag_values
(
saved_flag_values
)
return
runner
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment