Commit 2207d2e0 authored by Zhichao Lu's avatar Zhichao Lu Committed by lzc5123016
Browse files

Pass in is_crowd for coco evaluation.

PiperOrigin-RevId: 187527188
parent 56abd776
...@@ -74,23 +74,30 @@ class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator): ...@@ -74,23 +74,30 @@ class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):
[ymin, xmin, ymax, xmax] in absolute image coordinates. [ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes. [num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
shape [num_boxes] containing iscrowd flag for groundtruth boxes.
""" """
if image_id in self._image_ids: if image_id in self._image_ids:
tf.logging.warning('Ignoring ground truth with image id %s since it was ' tf.logging.warning('Ignoring ground truth with image id %s since it was '
'previously added', image_id) 'previously added', image_id)
return return
groundtruth_is_crowd = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_is_crowd)
# Drop groundtruth_is_crowd if empty tensor.
if groundtruth_is_crowd is not None and not groundtruth_is_crowd.shape[0]:
groundtruth_is_crowd = None
self._groundtruth_list.extend( self._groundtruth_list.extend(
coco_tools. coco_tools.ExportSingleImageGroundtruthToCoco(
ExportSingleImageGroundtruthToCoco(
image_id=image_id, image_id=image_id,
next_annotation_id=self._annotation_id, next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set, category_id_set=self._category_id_set,
groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields. groundtruth_boxes=groundtruth_dict[
groundtruth_boxes], standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes=groundtruth_dict[standard_fields. groundtruth_classes=groundtruth_dict[
InputDataFields. standard_fields.InputDataFields.groundtruth_classes],
groundtruth_classes])) groundtruth_is_crowd=groundtruth_is_crowd))
self._annotation_id += groundtruth_dict[standard_fields.InputDataFields. self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes].shape[0] groundtruth_boxes].shape[0]
self._image_ids[image_id] = False self._image_ids[image_id] = False
......
...@@ -86,6 +86,78 @@ class CocoDetectionEvaluationTest(tf.test.TestCase): ...@@ -86,6 +86,78 @@ class CocoDetectionEvaluationTest(tf.test.TestCase):
metrics = coco_evaluator.evaluate() metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self):
"""Tests computing mAP with is_crowd GT boxes skipped."""
category_list = [{
'id': 0,
'name': 'person'
}, {
'id': 1,
'name': 'cat'
}, {
'id': 2,
'name': 'dog'
}]
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1, 2]),
standard_fields.InputDataFields.groundtruth_is_crowd:
np.array([0, 1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsEmptyCrowd(self):
"""Tests computing mAP with empty is_crowd array passed in."""
category_list = [{
'id': 0,
'name': 'person'
}, {
'id': 1,
'name': 'cat'
}, {
'id': 2,
'name': 'dog'
}]
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_is_crowd:
np.array([])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testRejectionOnDuplicateGroundtruth(self): def testRejectionOnDuplicateGroundtruth(self):
"""Tests that groundtruth cannot be added more than once for an image.""" """Tests that groundtruth cannot be added more than once for an image."""
categories = [{'id': 1, 'name': 'cat'}, categories = [{'id': 1, 'name': 'cat'},
......
...@@ -327,7 +327,8 @@ def ExportSingleImageGroundtruthToCoco(image_id, ...@@ -327,7 +327,8 @@ def ExportSingleImageGroundtruthToCoco(image_id,
category_id_set, category_id_set,
groundtruth_boxes, groundtruth_boxes,
groundtruth_classes, groundtruth_classes,
groundtruth_masks=None): groundtruth_masks=None,
groundtruth_is_crowd=None):
"""Export groundtruth of a single image to COCO format. """Export groundtruth of a single image to COCO format.
This function converts groundtruth detection annotations represented as numpy This function converts groundtruth detection annotations represented as numpy
...@@ -338,8 +339,7 @@ def ExportSingleImageGroundtruthToCoco(image_id, ...@@ -338,8 +339,7 @@ def ExportSingleImageGroundtruthToCoco(image_id,
groundtruth_classes[i] are associated with the same groundtruth annotation. groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the In the exported result, "area" fields are always set to the area of the
groundtruth bounding box and "iscrowd" fields are always set to 0. groundtruth bounding box.
TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset.
Args: Args:
image_id: a unique image identifier either of type integer or string. image_id: a unique image identifier either of type integer or string.
...@@ -352,6 +352,8 @@ def ExportSingleImageGroundtruthToCoco(image_id, ...@@ -352,6 +352,8 @@ def ExportSingleImageGroundtruthToCoco(image_id,
groundtruth_classes: numpy array (int) with shape [num_gt_boxes] groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_masks: optional uint8 numpy array of shape [num_detections, groundtruth_masks: optional uint8 numpy array of shape [num_detections,
image_height, image_width] containing detection_masks. image_height, image_width] containing detection_masks.
groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]
indicating whether groundtruth boxes are crowd.
Returns: Returns:
a list of groundtruth annotations for a single image in the COCO format. a list of groundtruth annotations for a single image in the COCO format.
...@@ -379,17 +381,27 @@ def ExportSingleImageGroundtruthToCoco(image_id, ...@@ -379,17 +381,27 @@ def ExportSingleImageGroundtruthToCoco(image_id,
'Classes shape: %d. Boxes shape: %d. Image ID: %s' % ( 'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (
groundtruth_classes.shape[0], groundtruth_classes.shape[0],
groundtruth_boxes.shape[0], image_id)) groundtruth_boxes.shape[0], image_id))
has_is_crowd = groundtruth_is_crowd is not None
if has_is_crowd and len(groundtruth_is_crowd.shape) != 1:
raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')
groundtruth_list = [] groundtruth_list = []
for i in range(num_boxes): for i in range(num_boxes):
if groundtruth_classes[i] in category_id_set: if groundtruth_classes[i] in category_id_set:
iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0
export_dict = { export_dict = {
'id': next_annotation_id + i, 'id':
'image_id': image_id, next_annotation_id + i,
'category_id': int(groundtruth_classes[i]), 'image_id':
'bbox': list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])), image_id,
'area': float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) * 'category_id':
(groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1])), int(groundtruth_classes[i]),
'iscrowd': 0 'bbox':
list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),
'area':
float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) *
(groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1])),
'iscrowd':
iscrowd
} }
if groundtruth_masks is not None: if groundtruth_masks is not None:
export_dict['segmentation'] = _RleCompress(groundtruth_masks[i]) export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])
......
...@@ -248,7 +248,11 @@ class CocoToolsTest(tf.test.TestCase): ...@@ -248,7 +248,11 @@ class CocoToolsTest(tf.test.TestCase):
[0, 0, .5, .5], [0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32) [.5, .5, .5, .5]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32) classes = np.array([1, 2, 3], dtype=np.int32)
is_crowd = np.array([0, 1, 0], dtype=np.int32)
next_annotation_id = 1 next_annotation_id = 1
expected_counts = ['04', '31', '4']
# Tests exporting without passing in is_crowd (for backward compatibility).
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image', image_id='first_image',
category_id_set=set([1, 2, 3]), category_id_set=set([1, 2, 3]),
...@@ -256,7 +260,6 @@ class CocoToolsTest(tf.test.TestCase): ...@@ -256,7 +260,6 @@ class CocoToolsTest(tf.test.TestCase):
groundtruth_boxes=boxes, groundtruth_boxes=boxes,
groundtruth_classes=classes, groundtruth_classes=classes,
groundtruth_masks=masks) groundtruth_masks=masks)
expected_counts = ['04', '31', '4']
for i, annotation in enumerate(coco_annotations): for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'], self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i]) expected_counts[i])
...@@ -267,6 +270,26 @@ class CocoToolsTest(tf.test.TestCase): ...@@ -267,6 +270,26 @@ class CocoToolsTest(tf.test.TestCase):
self.assertEqual(annotation['category_id'], classes[i]) self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['id'], i + next_annotation_id) self.assertEqual(annotation['id'], i + next_annotation_id)
# Tests exporting with is_crowd.
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks,
groundtruth_is_crowd=is_crowd)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['iscrowd'], is_crowd[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
if __name__ == '__main__': if __name__ == '__main__':
tf.test.main() tf.test.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment