Commit a2de5799 authored by Vivek Rathod's avatar Vivek Rathod Committed by TF Object Detection Team
Browse files

Slice groundtruth tensors to remove padding during evaluation.

PiperOrigin-RevId: 343491021
parent 1d38cca0
......@@ -373,8 +373,9 @@ class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
num_gt_boxes_per_image = eval_dict.get(
'num_groundtruth_boxes_per_image', None)
num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None)
input_data_fields.num_groundtruth_boxes, None)
num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections,
None)
is_annotated = eval_dict.get('is_annotated', None)
if groundtruth_is_crowd is None:
......
......@@ -890,8 +890,7 @@ def eager_eval_loop(
tf.logging.info('Finished eval step %d', i)
use_original_images = fields.InputDataFields.original_image in features
if (use_original_images and i < eval_config.num_visualizations
and batch_size == 1):
if (use_original_images and i < eval_config.num_visualizations):
sbys_image_list = vutils.draw_side_by_side_evaluation_image(
eval_dict,
category_index=category_index,
......@@ -899,20 +898,20 @@ def eager_eval_loop(
min_score_thresh=eval_config.min_score_threshold,
use_normalized_coordinates=False,
keypoint_edges=keypoint_edges or None)
sbys_images = tf.concat(sbys_image_list, axis=0)
for j, sbys_image in enumerate(sbys_image_list):
tf.compat.v2.summary.image(
name='eval_side_by_side_' + str(i),
name='eval_side_by_side_{}_{}'.format(i, j),
step=global_step,
data=sbys_images,
data=sbys_image,
max_outputs=eval_config.num_visualizations)
if eval_util.has_densepose(eval_dict):
dp_image_list = vutils.draw_densepose_visualizations(
eval_dict)
dp_images = tf.concat(dp_image_list, axis=0)
for j, dp_image in enumerate(dp_image_list):
tf.compat.v2.summary.image(
name='densepose_detections_' + str(i),
name='densepose_detections_{}_{}'.format(i, j),
step=global_step,
data=dp_images,
data=dp_image,
max_outputs=eval_config.num_visualizations)
if evaluators is None:
......
......@@ -664,6 +664,10 @@ def draw_side_by_side_evaluation_image(eval_dict,
key != input_data_fields.image_additional_channels):
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
num_gt_boxes = [-1] * eval_dict[input_data_fields.original_image].shape[0]
if input_data_fields.num_groundtruth_boxes in eval_dict:
num_gt_boxes = tf.cast(eval_dict[input_data_fields.num_groundtruth_boxes],
tf.int32)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
......@@ -702,7 +706,6 @@ def draw_side_by_side_evaluation_image(eval_dict,
groundtruth_keypoint_scores = tf.cast(
keypoint_ops.set_keypoint_visibilities(
groundtruth_keypoints), dtype=tf.float32)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
......@@ -725,16 +728,23 @@ def draw_side_by_side_evaluation_image(eval_dict,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
num_gt_boxes_i = num_gt_boxes[indx]
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
eval_dict[input_data_fields.original_image][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
eval_dict[input_data_fields.groundtruth_boxes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
dtype=tf.float32),
axis=0),
category_index,
......@@ -760,13 +770,17 @@ def draw_side_by_side_evaluation_image(eval_dict,
eval_dict[input_data_fields.image_additional_channels][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
eval_dict[input_data_fields.groundtruth_boxes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx],
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
eval_dict[input_data_fields.groundtruth_classes][indx]
[num_gt_boxes_i],
dtype=tf.float32),
axis=0),
category_index,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment