Commit 645202b1 authored by Dong Lin's avatar Dong Lin Committed by Toby Boyd
Browse files

Move metrics info from extras to metrics field in test_log.proto (#6548)

parent 17ef6405
...@@ -18,7 +18,6 @@ from __future__ import absolute_import ...@@ -18,7 +18,6 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import json
import os import os
import time import time
...@@ -127,48 +126,22 @@ class EstimatorCifar10BenchmarkTests(tf.test.Benchmark): ...@@ -127,48 +126,22 @@ class EstimatorCifar10BenchmarkTests(tf.test.Benchmark):
break break
eval_results = stats['eval_results'] eval_results = stats['eval_results']
extras = {} metrics = []
extras['accuracy_top_1'] = self._json_description( metrics.append({'name': 'accuracy_top_1',
eval_results['accuracy'].item(), 'value': eval_results['accuracy'].item()})
priority=0) metrics.append({'name': 'accuracy_top_5',
extras['accuracy_top_5'] = self._json_description( 'value': eval_results['accuracy_top_5'].item()})
eval_results['accuracy_top_5'].item())
if examples_per_sec_hook: if examples_per_sec_hook:
exp_per_second_list = examples_per_sec_hook.current_examples_per_sec_list exp_per_second_list = examples_per_sec_hook.current_examples_per_sec_list
# ExamplesPerSecondHook skips the first 10 steps. # ExamplesPerSecondHook skips the first 10 steps.
exp_per_sec = sum(exp_per_second_list) / (len(exp_per_second_list)) exp_per_sec = sum(exp_per_second_list) / (len(exp_per_second_list))
extras['exp_per_second'] = self._json_description(exp_per_sec) metrics.append({'name': 'exp_per_second',
'value': exp_per_sec})
self.report_benchmark( self.report_benchmark(
iters=eval_results['global_step'], iters=eval_results['global_step'],
wall_time=wall_time_sec, wall_time=wall_time_sec,
extras=extras) metrics=metrics)
def _json_description(self,
value,
priority=None,
min_value=None,
max_value=None):
"""Get a json-formatted string describing the attributes for a metric."""
attributes = {}
attributes['value'] = value
if priority:
attributes['priority'] = priority
if min_value:
attributes['min_value'] = min_value
if max_value:
attributes['max_value'] = max_value
if min_value or max_value:
succeeded = True
if min_value and value < min_value:
succeeded = False
if max_value and value > max_value:
succeeded = False
attributes['succeeded'] = succeeded
return json.dumps(attributes)
def _get_model_dir(self, folder_name): def _get_model_dir(self, folder_name):
return os.path.join(self.output_dir, folder_name) return os.path.join(self.output_dir, folder_name)
......
...@@ -19,8 +19,6 @@ from __future__ import division ...@@ -19,8 +19,6 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import os import os
import time
import json
from absl import flags from absl import flags
from absl.testing import flagsaver from absl.testing import flagsaver
...@@ -77,15 +75,14 @@ class KerasBenchmark(tf.test.Benchmark): ...@@ -77,15 +75,14 @@ class KerasBenchmark(tf.test.Benchmark):
warmup: number of entries in stats['step_timestamp_log'] to ignore. warmup: number of entries in stats['step_timestamp_log'] to ignore.
""" """
extras = {} metrics = []
if 'accuracy_top_1' in stats: if 'accuracy_top_1' in stats:
extras['accuracy_top_1'] = self._json_description( metrics.append({'name': 'accuracy_top_1',
stats['accuracy_top_1'], 'value': stats['accuracy_top_1'],
priority=0, 'min_value': top_1_min,
min_value=top_1_min, 'max_value': top_1_max})
max_value=top_1_max) metrics.append({'name': 'top_1_train_accuracy',
extras['top_1_train_accuracy'] = self._json_description( 'value': stats['training_accuracy_top_1']})
stats['training_accuracy_top_1'], priority=1)
if (warmup and 'step_timestamp_log' in stats and if (warmup and 'step_timestamp_log' in stats and
len(stats['step_timestamp_log']) > warmup): len(stats['step_timestamp_log']) > warmup):
...@@ -96,37 +93,11 @@ class KerasBenchmark(tf.test.Benchmark): ...@@ -96,37 +93,11 @@ class KerasBenchmark(tf.test.Benchmark):
num_examples = ( num_examples = (
total_batch_size * log_steps * (len(time_log) - warmup - 1)) total_batch_size * log_steps * (len(time_log) - warmup - 1))
examples_per_sec = num_examples / elapsed examples_per_sec = num_examples / elapsed
extras['exp_per_second'] = self._json_description( metrics.append({'name': 'exp_per_second',
examples_per_sec, priority=2) 'value': examples_per_sec})
if 'avg_exp_per_second' in stats: if 'avg_exp_per_second' in stats:
extras['avg_exp_per_second'] = self._json_description( metrics.append({'name': 'avg_exp_per_second',
stats['avg_exp_per_second'], priority=3) 'value': stats['avg_exp_per_second']})
self.report_benchmark(iters=-1, wall_time=wall_time_sec, extras=extras) self.report_benchmark(iters=-1, wall_time=wall_time_sec, metrics=metrics)
def _json_description(self,
value,
priority=None,
min_value=None,
max_value=None):
"""Get a json-formatted string describing the attributes for a metric"""
attributes = {}
attributes['value'] = value
if priority:
attributes['priority'] = priority
if min_value:
attributes['min_value'] = min_value
if max_value:
attributes['max_value'] = max_value
if min_value or max_value:
succeeded = True
if min_value and value < min_value:
succeeded = False
if max_value and value > max_value:
succeeded = False
attributes['succeeded'] = succeeded
return json.dumps(attributes)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment