Commit a2de5799 authored by Vivek Rathod's avatar Vivek Rathod Committed by TF Object Detection Team
Browse files

Slice groundtruth tensors to remove padding during evaluation.

PiperOrigin-RevId: 343491021
parent 1d38cca0
...@@ -373,8 +373,9 @@ class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator): ...@@ -373,8 +373,9 @@ class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):
detection_scores = eval_dict[detection_fields.detection_scores] detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes] detection_classes = eval_dict[detection_fields.detection_classes]
num_gt_boxes_per_image = eval_dict.get( num_gt_boxes_per_image = eval_dict.get(
'num_groundtruth_boxes_per_image', None) input_data_fields.num_groundtruth_boxes, None)
num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None) num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections,
None)
is_annotated = eval_dict.get('is_annotated', None) is_annotated = eval_dict.get('is_annotated', None)
if groundtruth_is_crowd is None: if groundtruth_is_crowd is None:
......
...@@ -890,8 +890,7 @@ def eager_eval_loop( ...@@ -890,8 +890,7 @@ def eager_eval_loop(
tf.logging.info('Finished eval step %d', i) tf.logging.info('Finished eval step %d', i)
use_original_images = fields.InputDataFields.original_image in features use_original_images = fields.InputDataFields.original_image in features
if (use_original_images and i < eval_config.num_visualizations if (use_original_images and i < eval_config.num_visualizations):
and batch_size == 1):
sbys_image_list = vutils.draw_side_by_side_evaluation_image( sbys_image_list = vutils.draw_side_by_side_evaluation_image(
eval_dict, eval_dict,
category_index=category_index, category_index=category_index,
...@@ -899,21 +898,21 @@ def eager_eval_loop( ...@@ -899,21 +898,21 @@ def eager_eval_loop(
min_score_thresh=eval_config.min_score_threshold, min_score_thresh=eval_config.min_score_threshold,
use_normalized_coordinates=False, use_normalized_coordinates=False,
keypoint_edges=keypoint_edges or None) keypoint_edges=keypoint_edges or None)
sbys_images = tf.concat(sbys_image_list, axis=0) for j, sbys_image in enumerate(sbys_image_list):
tf.compat.v2.summary.image(
name='eval_side_by_side_' + str(i),
step=global_step,
data=sbys_images,
max_outputs=eval_config.num_visualizations)
if eval_util.has_densepose(eval_dict):
dp_image_list = vutils.draw_densepose_visualizations(
eval_dict)
dp_images = tf.concat(dp_image_list, axis=0)
tf.compat.v2.summary.image( tf.compat.v2.summary.image(
name='densepose_detections_' + str(i), name='eval_side_by_side_{}_{}'.format(i, j),
step=global_step, step=global_step,
data=dp_images, data=sbys_image,
max_outputs=eval_config.num_visualizations) max_outputs=eval_config.num_visualizations)
if eval_util.has_densepose(eval_dict):
dp_image_list = vutils.draw_densepose_visualizations(
eval_dict)
for j, dp_image in enumerate(dp_image_list):
tf.compat.v2.summary.image(
name='densepose_detections_{}_{}'.format(i, j),
step=global_step,
data=dp_image,
max_outputs=eval_config.num_visualizations)
if evaluators is None: if evaluators is None:
if class_agnostic: if class_agnostic:
......
...@@ -664,6 +664,10 @@ def draw_side_by_side_evaluation_image(eval_dict, ...@@ -664,6 +664,10 @@ def draw_side_by_side_evaluation_image(eval_dict,
key != input_data_fields.image_additional_channels): key != input_data_fields.image_additional_channels):
eval_dict[key] = tf.expand_dims(eval_dict[key], 0) eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
num_gt_boxes = [-1] * eval_dict[input_data_fields.original_image].shape[0]
if input_data_fields.num_groundtruth_boxes in eval_dict:
num_gt_boxes = tf.cast(eval_dict[input_data_fields.num_groundtruth_boxes],
tf.int32)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]): for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None instance_masks = None
if detection_fields.detection_masks in eval_dict: if detection_fields.detection_masks in eval_dict:
...@@ -702,7 +706,6 @@ def draw_side_by_side_evaluation_image(eval_dict, ...@@ -702,7 +706,6 @@ def draw_side_by_side_evaluation_image(eval_dict,
groundtruth_keypoint_scores = tf.cast( groundtruth_keypoint_scores = tf.cast(
keypoint_ops.set_keypoint_visibilities( keypoint_ops.set_keypoint_visibilities(
groundtruth_keypoints), dtype=tf.float32) groundtruth_keypoints), dtype=tf.float32)
images_with_detections = draw_bounding_boxes_on_image_tensors( images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims( tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0), eval_dict[input_data_fields.original_image][indx], axis=0),
...@@ -725,16 +728,23 @@ def draw_side_by_side_evaluation_image(eval_dict, ...@@ -725,16 +728,23 @@ def draw_side_by_side_evaluation_image(eval_dict,
max_boxes_to_draw=max_boxes_to_draw, max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh, min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates) use_normalized_coordinates=use_normalized_coordinates)
num_gt_boxes_i = num_gt_boxes[indx]
images_with_groundtruth = draw_bounding_boxes_on_image_tensors( images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims( tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0), eval_dict[input_data_fields.original_image][indx],
axis=0),
tf.expand_dims( tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0), eval_dict[input_data_fields.groundtruth_boxes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims( tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], axis=0), eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims( tf.expand_dims(
tf.ones_like( tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx], eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
dtype=tf.float32), dtype=tf.float32),
axis=0), axis=0),
category_index, category_index,
...@@ -760,13 +770,17 @@ def draw_side_by_side_evaluation_image(eval_dict, ...@@ -760,13 +770,17 @@ def draw_side_by_side_evaluation_image(eval_dict,
eval_dict[input_data_fields.image_additional_channels][indx], eval_dict[input_data_fields.image_additional_channels][indx],
axis=0), axis=0),
tf.expand_dims( tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0), eval_dict[input_data_fields.groundtruth_boxes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims( tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
axis=0), axis=0),
tf.expand_dims( tf.expand_dims(
tf.ones_like( tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx], eval_dict[input_data_fields.groundtruth_classes][indx]
[num_gt_boxes_i],
dtype=tf.float32), dtype=tf.float32),
axis=0), axis=0),
category_index, category_index,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment