"tests/L0/vscode:/vscode.git/clone" did not exist on "c3d4bfe8224967baef120be1ce6a8a0cc82c12ea"
Commit e46021a7 authored by Zhichao Lu's avatar Zhichao Lu Committed by pkulzc
Browse files

Apply fix for image summary in eval metrics, so that eval on train works appropriately.

PiperOrigin-RevId: 189815553
parent 7007d9e3
......@@ -362,6 +362,7 @@ def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False):
else:
category_index = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
img_summary = None
if not use_tpu and use_original_images:
detection_and_groundtruth = (
vis_utils.draw_side_by_side_evaluation_image(
......@@ -378,8 +379,9 @@ def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False):
eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_metrics, category_index.values(), eval_dict,
include_metrics_per_category=False)
eval_metric_ops['Detections_Left_Groundtruth_Right'] = (
img_summary, tf.no_op())
if img_summary is not None:
eval_metric_ops['Detections_Left_Groundtruth_Right'] = (
img_summary, tf.no_op())
if use_tpu:
return tf.contrib.tpu.TPUEstimatorSpec(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment