"examples/cpp/vscode:/vscode.git/clone" did not exist on "06125966d7054a53458086f342734ea01dc2faf4"
Commit 00f71bf9 authored by A. Unique TensorFlower's avatar A. Unique TensorFlower Committed by TF Object Detection Team
Browse files

Image-level labels are not propagated to the open images challenge metric.

PiperOrigin-RevId: 417394640
parent 48b4b573
...@@ -327,6 +327,7 @@ class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)): ...@@ -327,6 +327,7 @@ class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)):
groundtruth_not_exhaustive_classes=None, groundtruth_not_exhaustive_classes=None,
groundtruth_keypoint_depths_list=None, groundtruth_keypoint_depths_list=None,
groundtruth_keypoint_depth_weights_list=None, groundtruth_keypoint_depth_weights_list=None,
groundtruth_image_classes=None,
training_step=None): training_step=None):
"""Provide groundtruth tensors. """Provide groundtruth tensors.
...@@ -398,6 +399,9 @@ class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)): ...@@ -398,6 +399,9 @@ class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)):
groundtruth_keypoint_depth_weights_list: a list of 2-D tf.float32 tensors groundtruth_keypoint_depth_weights_list: a list of 2-D tf.float32 tensors
of shape [num_boxes, num_keypoints] containing the weights of the of shape [num_boxes, num_keypoints] containing the weights of the
relative depths. relative depths.
groundtruth_image_classes: A list of 1-D tf.float32 tensors of shape
[num_classes], containing label indices encoded as k-hot of the classes
that are present or not present in the image.
training_step: An integer denoting the current training step. This is training_step: An integer denoting the current training step. This is
useful when models want to anneal loss terms. useful when models want to anneal loss terms.
""" """
...@@ -474,6 +478,10 @@ class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)): ...@@ -474,6 +478,10 @@ class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)):
self._groundtruth_lists[ self._groundtruth_lists[
fields.InputDataFields fields.InputDataFields
.groundtruth_verified_neg_classes] = groundtruth_verified_neg_classes .groundtruth_verified_neg_classes] = groundtruth_verified_neg_classes
if groundtruth_image_classes:
self._groundtruth_lists[
fields.InputDataFields
.groundtruth_image_classes] = groundtruth_image_classes
if groundtruth_not_exhaustive_classes: if groundtruth_not_exhaustive_classes:
self._groundtruth_lists[ self._groundtruth_lists[
fields.InputDataFields fields.InputDataFields
......
...@@ -668,7 +668,8 @@ def _get_labels_dict(input_dict): ...@@ -668,7 +668,8 @@ def _get_labels_dict(input_dict):
fields.InputDataFields.groundtruth_dp_surface_coords, fields.InputDataFields.groundtruth_dp_surface_coords,
fields.InputDataFields.groundtruth_track_ids, fields.InputDataFields.groundtruth_track_ids,
fields.InputDataFields.groundtruth_verified_neg_classes, fields.InputDataFields.groundtruth_verified_neg_classes,
fields.InputDataFields.groundtruth_not_exhaustive_classes fields.InputDataFields.groundtruth_not_exhaustive_classes,
fields.InputDataFields.groundtruth_image_classes,
] ]
for key in optional_label_keys: for key in optional_label_keys:
......
...@@ -114,6 +114,10 @@ def _prepare_groundtruth_for_eval(detection_model, class_agnostic, ...@@ -114,6 +114,10 @@ def _prepare_groundtruth_for_eval(detection_model, class_agnostic,
'groundtruth_not_exhaustive_classes': [batch_size, num_classes] K-hot 'groundtruth_not_exhaustive_classes': [batch_size, num_classes] K-hot
representation of 1-indexed classes which don't have all of their representation of 1-indexed classes which don't have all of their
instances marked exhaustively. instances marked exhaustively.
'input_data_fields.groundtruth_image_classes': integer representation of
the classes that were sent for verification for a given image. Note that
this field does not support batching as the number of classes can be
variable.
class_agnostic: Boolean indicating whether detections are class agnostic. class_agnostic: Boolean indicating whether detections are class agnostic.
""" """
input_data_fields = fields.InputDataFields() input_data_fields = fields.InputDataFields()
...@@ -136,6 +140,18 @@ def _prepare_groundtruth_for_eval(detection_model, class_agnostic, ...@@ -136,6 +140,18 @@ def _prepare_groundtruth_for_eval(detection_model, class_agnostic,
input_data_fields.groundtruth_classes: groundtruth_classes input_data_fields.groundtruth_classes: groundtruth_classes
} }
if detection_model.groundtruth_has_field(
input_data_fields.groundtruth_image_classes):
groundtruth_image_classes_k_hot = tf.stack(
detection_model.groundtruth_lists(
input_data_fields.groundtruth_image_classes))
# We do not add label_id_offset here because it was not added when encoding
# groundtruth_image_classes.
groundtruth_image_classes = tf.expand_dims(
tf.where(groundtruth_image_classes_k_hot > 0)[:, 1], 0)
groundtruth[
input_data_fields.groundtruth_image_classes] = groundtruth_image_classes
if detection_model.groundtruth_has_field(fields.BoxListFields.masks): if detection_model.groundtruth_has_field(fields.BoxListFields.masks):
groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack( groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.masks)) detection_model.groundtruth_lists(fields.BoxListFields.masks))
...@@ -384,6 +400,10 @@ def provide_groundtruth(model, labels, training_step=None): ...@@ -384,6 +400,10 @@ def provide_groundtruth(model, labels, training_step=None):
if fields.InputDataFields.groundtruth_not_exhaustive_classes in labels: if fields.InputDataFields.groundtruth_not_exhaustive_classes in labels:
gt_not_exhaustive_classes = labels[ gt_not_exhaustive_classes = labels[
fields.InputDataFields.groundtruth_not_exhaustive_classes] fields.InputDataFields.groundtruth_not_exhaustive_classes]
groundtruth_image_classes = None
if fields.InputDataFields.groundtruth_image_classes in labels:
groundtruth_image_classes = labels[
fields.InputDataFields.groundtruth_image_classes]
model.provide_groundtruth( model.provide_groundtruth(
groundtruth_boxes_list=gt_boxes_list, groundtruth_boxes_list=gt_boxes_list,
groundtruth_classes_list=gt_classes_list, groundtruth_classes_list=gt_classes_list,
...@@ -405,6 +425,7 @@ def provide_groundtruth(model, labels, training_step=None): ...@@ -405,6 +425,7 @@ def provide_groundtruth(model, labels, training_step=None):
groundtruth_not_exhaustive_classes=gt_not_exhaustive_classes, groundtruth_not_exhaustive_classes=gt_not_exhaustive_classes,
groundtruth_keypoint_depths_list=gt_keypoint_depths_list, groundtruth_keypoint_depths_list=gt_keypoint_depths_list,
groundtruth_keypoint_depth_weights_list=gt_keypoint_depth_weights_list, groundtruth_keypoint_depth_weights_list=gt_keypoint_depth_weights_list,
groundtruth_image_classes=groundtruth_image_classes,
training_step=training_step) training_step=training_step)
......
...@@ -870,6 +870,9 @@ class OpenImagesChallengeEvaluator(OpenImagesDetectionEvaluator): ...@@ -870,6 +870,9 @@ class OpenImagesChallengeEvaluator(OpenImagesDetectionEvaluator):
image_classes = groundtruth_dict[input_fields.groundtruth_image_classes] image_classes = groundtruth_dict[input_fields.groundtruth_image_classes]
elif input_fields.groundtruth_labeled_classes in groundtruth_dict: elif input_fields.groundtruth_labeled_classes in groundtruth_dict:
image_classes = groundtruth_dict[input_fields.groundtruth_labeled_classes] image_classes = groundtruth_dict[input_fields.groundtruth_labeled_classes]
else:
logging.warning('No image classes field found for image with id %s!',
image_id)
image_classes -= self._label_id_offset image_classes -= self._label_id_offset
self._evaluatable_labels[image_id] = np.unique( self._evaluatable_labels[image_id] = np.unique(
np.concatenate((image_classes, groundtruth_classes))) np.concatenate((image_classes, groundtruth_classes)))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment