Commit 645202b1 authored by Dong Lin's avatar Dong Lin Committed by Toby Boyd
Browse files

Move metrics info from extras to metrics field in test_log.proto (#6548)

parent 17ef6405
......@@ -18,7 +18,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
......@@ -127,48 +126,22 @@ class EstimatorCifar10BenchmarkTests(tf.test.Benchmark):
break
eval_results = stats['eval_results']
extras = {}
extras['accuracy_top_1'] = self._json_description(
eval_results['accuracy'].item(),
priority=0)
extras['accuracy_top_5'] = self._json_description(
eval_results['accuracy_top_5'].item())
metrics = []
metrics.append({'name': 'accuracy_top_1',
'value': eval_results['accuracy'].item()})
metrics.append({'name': 'accuracy_top_5',
'value': eval_results['accuracy_top_5'].item()})
if examples_per_sec_hook:
exp_per_second_list = examples_per_sec_hook.current_examples_per_sec_list
# ExamplesPerSecondHook skips the first 10 steps.
exp_per_sec = sum(exp_per_second_list) / (len(exp_per_second_list))
extras['exp_per_second'] = self._json_description(exp_per_sec)
metrics.append({'name': 'exp_per_second',
'value': exp_per_sec})
self.report_benchmark(
iters=eval_results['global_step'],
wall_time=wall_time_sec,
extras=extras)
def _json_description(self,
value,
priority=None,
min_value=None,
max_value=None):
"""Get a json-formatted string describing the attributes for a metric."""
attributes = {}
attributes['value'] = value
if priority:
attributes['priority'] = priority
if min_value:
attributes['min_value'] = min_value
if max_value:
attributes['max_value'] = max_value
if min_value or max_value:
succeeded = True
if min_value and value < min_value:
succeeded = False
if max_value and value > max_value:
succeeded = False
attributes['succeeded'] = succeeded
return json.dumps(attributes)
metrics=metrics)
def _get_model_dir(self, folder_name):
return os.path.join(self.output_dir, folder_name)
......
......@@ -19,8 +19,6 @@ from __future__ import division
from __future__ import print_function
import os
import time
import json
from absl import flags
from absl.testing import flagsaver
......@@ -77,15 +75,14 @@ class KerasBenchmark(tf.test.Benchmark):
warmup: number of entries in stats['step_timestamp_log'] to ignore.
"""
extras = {}
metrics = []
if 'accuracy_top_1' in stats:
extras['accuracy_top_1'] = self._json_description(
stats['accuracy_top_1'],
priority=0,
min_value=top_1_min,
max_value=top_1_max)
extras['top_1_train_accuracy'] = self._json_description(
stats['training_accuracy_top_1'], priority=1)
metrics.append({'name': 'accuracy_top_1',
'value': stats['accuracy_top_1'],
'min_value': top_1_min,
'max_value': top_1_max})
metrics.append({'name': 'top_1_train_accuracy',
'value': stats['training_accuracy_top_1']})
if (warmup and 'step_timestamp_log' in stats and
len(stats['step_timestamp_log']) > warmup):
......@@ -96,37 +93,11 @@ class KerasBenchmark(tf.test.Benchmark):
num_examples = (
total_batch_size * log_steps * (len(time_log) - warmup - 1))
examples_per_sec = num_examples / elapsed
extras['exp_per_second'] = self._json_description(
examples_per_sec, priority=2)
metrics.append({'name': 'exp_per_second',
'value': examples_per_sec})
if 'avg_exp_per_second' in stats:
extras['avg_exp_per_second'] = self._json_description(
stats['avg_exp_per_second'], priority=3)
metrics.append({'name': 'avg_exp_per_second',
'value': stats['avg_exp_per_second']})
self.report_benchmark(iters=-1, wall_time=wall_time_sec, extras=extras)
def _json_description(self,
value,
priority=None,
min_value=None,
max_value=None):
"""Get a json-formatted string describing the attributes for a metric"""
attributes = {}
attributes['value'] = value
if priority:
attributes['priority'] = priority
if min_value:
attributes['min_value'] = min_value
if max_value:
attributes['max_value'] = max_value
if min_value or max_value:
succeeded = True
if min_value and value < min_value:
succeeded = False
if max_value and value > max_value:
succeeded = False
attributes['succeeded'] = succeeded
return json.dumps(attributes)
self.report_benchmark(iters=-1, wall_time=wall_time_sec, metrics=metrics)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment