Commit 2a553e51 authored by A. Unique TensorFlower's avatar A. Unique TensorFlower
Browse files

Internal change

PiperOrigin-RevId: 344869679
parent 836a0129
......@@ -31,10 +31,11 @@ from __future__ import division
from __future__ import print_function
import atexit
import copy
import tempfile
import numpy as np
from absl import logging
import numpy as np
from pycocotools import cocoeval
import six
import tensorflow as tf
......@@ -258,6 +259,278 @@ class COCOEvaluator(object):
self._groundtruths[k].append(v)
class OlnXclassEvaluator(COCOEvaluator):
"""COCO evaluation metric class."""
def __init__(self, annotation_file, include_mask, need_rescale_bboxes=True,
use_category=True, seen_class='all'):
"""Constructs COCO evaluation class.
The class provides the interface to metrics_fn in TPUEstimator. The
_update_op() takes detections from each image and push them to
self.detections. The _evaluate() loads a JSON file in COCO annotation format
as the groundtruths and runs COCO evaluation.
Args:
annotation_file: a JSON file that stores annotations of the eval dataset.
If `annotation_file` is None, groundtruth annotations will be loaded
from the dataloader.
include_mask: a boolean to indicate whether or not to include the mask
eval.
need_rescale_bboxes: If true bboxes in `predictions` will be rescaled back
to absolute values (`image_info` is needed in this case).
use_category: if `False`, treat all object in all classes in one
foreground category.
seen_class: 'all' or 'voc' or 'nonvoc'
"""
super(OlnXclassEvaluator, self).__init__(
annotation_file=annotation_file,
include_mask=include_mask,
need_rescale_bboxes=need_rescale_bboxes)
self._use_category = use_category
self._seen_class = seen_class
self._seen_class_ids = class_utils.coco_split_class_ids(seen_class)
self._metric_names = [
'AP', 'AP50', 'AP75',
'APs', 'APm', 'APl',
'ARmax10', 'ARmax20', 'ARmax50', 'ARmax100', 'ARmax200',
'ARmax10s', 'ARmax10m', 'ARmax10l'
]
if self._seen_class != 'all':
self._metric_names.extend([
'AP_seen', 'AP50_seen', 'AP75_seen',
'APs_seen', 'APm_seen', 'APl_seen',
'ARmax10_seen', 'ARmax20_seen', 'ARmax50_seen',
'ARmax100_seen', 'ARmax200_seen',
'ARmax10s_seen', 'ARmax10m_seen', 'ARmax10l_seen',
'AP_novel', 'AP50_novel', 'AP75_novel',
'APs_novel', 'APm_novel', 'APl_novel',
'ARmax10_novel', 'ARmax20_novel', 'ARmax50_novel',
'ARmax100_novel', 'ARmax200_novel',
'ARmax10s_novel', 'ARmax10m_novel', 'ARmax10l_novel',
])
if self._include_mask:
mask_metric_names = ['mask_' + x for x in self._metric_names]
self._metric_names.extend(mask_metric_names)
self._required_prediction_fields.extend(['detection_masks'])
self._required_groundtruth_fields.extend(['masks'])
self.reset()
def evaluate(self):
"""Evaluates with detections from all images with COCO API.
Returns:
coco_metric: float numpy array with shape [24] representing the
coco-style evaluation metrics (box and mask).
"""
if not self._annotation_file:
logging.info('Thre is no annotation_file in COCOEvaluator.')
gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
self._groundtruths)
coco_gt = coco_utils.COCOWrapper(
eval_type=('mask' if self._include_mask else 'box'),
gt_dataset=gt_dataset)
else:
logging.info('Using annotation file: %s', self._annotation_file)
coco_gt = self._coco_gt
coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
self._predictions)
coco_dt = coco_gt.loadRes(predictions=coco_predictions)
image_ids = [ann['image_id'] for ann in coco_predictions]
# Class manipulation: 'all' split samples -> ignored_split = 0.
for idx, ann in enumerate(coco_gt.dataset['annotations']):
coco_gt.dataset['annotations'][idx]['ignored_split'] = 0
coco_eval = cocoeval.OlnCOCOevalXclassWrapper(
coco_gt, coco_dt, iou_type='bbox')
coco_eval.params.maxDets = [10, 20, 50, 100, 200]
coco_eval.params.imgIds = image_ids
coco_eval.params.useCats = 0 if not self._use_category else 1
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_metrics = coco_eval.stats
if self._include_mask:
mcoco_eval = cocoeval.OlnCOCOevalXclassWrapper(
coco_gt, coco_dt, iou_type='segm')
mcoco_eval.params.maxDets = [10, 20, 50, 100, 200]
mcoco_eval.params.imgIds = image_ids
mcoco_eval.params.useCats = 0 if not self._use_category else 1
mcoco_eval.evaluate()
mcoco_eval.accumulate()
mcoco_eval.summarize()
mask_coco_metrics = mcoco_eval.stats
if self._include_mask:
metrics = np.hstack((coco_metrics, mask_coco_metrics))
else:
metrics = coco_metrics
if self._seen_class != 'all':
# for seen class eval, samples of novel_class are ignored.
coco_gt_seen = copy.deepcopy(coco_gt)
for idx, ann in enumerate(coco_gt.dataset['annotations']):
if ann['category_id'] in self._seen_class_ids:
coco_gt_seen.dataset['annotations'][idx]['ignored_split'] = 0
else:
coco_gt_seen.dataset['annotations'][idx]['ignored_split'] = 1
coco_eval_seen = cocoeval.OlnCOCOevalXclassWrapper(
coco_gt_seen, coco_dt, iou_type='bbox')
coco_eval_seen.params.maxDets = [10, 20, 50, 100, 200]
coco_eval_seen.params.imgIds = image_ids
coco_eval_seen.params.useCats = 0 if not self._use_category else 1
coco_eval_seen.evaluate()
coco_eval_seen.accumulate()
coco_eval_seen.summarize()
coco_metrics_seen = coco_eval_seen.stats
if self._include_mask:
mcoco_eval_seen = cocoeval.OlnCOCOevalXclassWrapper(
coco_gt_seen, coco_dt, iou_type='segm')
mcoco_eval_seen.params.maxDets = [10, 20, 50, 100, 200]
mcoco_eval_seen.params.imgIds = image_ids
mcoco_eval_seen.params.useCats = 0 if not self._use_category else 1
mcoco_eval_seen.evaluate()
mcoco_eval_seen.accumulate()
mcoco_eval_seen.summarize()
mask_coco_metrics_seen = mcoco_eval_seen.stats
# for novel class eval, samples of seen_class are ignored.
coco_gt_novel = copy.deepcopy(coco_gt)
for idx, ann in enumerate(coco_gt.dataset['annotations']):
if ann['category_id'] in self._seen_class_ids:
coco_gt_novel.dataset['annotations'][idx]['ignored_split'] = 1
else:
coco_gt_novel.dataset['annotations'][idx]['ignored_split'] = 0
coco_eval_novel = cocoeval.OlnCOCOevalXclassWrapper(
coco_gt_novel, coco_dt, iou_type='bbox')
coco_eval_novel.params.maxDets = [10, 20, 50, 100, 200]
coco_eval_novel.params.imgIds = image_ids
coco_eval_novel.params.useCats = 0 if not self._use_category else 1
coco_eval_novel.evaluate()
coco_eval_novel.accumulate()
coco_eval_novel.summarize()
coco_metrics_novel = coco_eval_novel.stats
if self._include_mask:
mcoco_eval_novel = cocoeval.OlnCOCOevalXclassWrapper(
coco_gt_novel, coco_dt, iou_type='segm')
mcoco_eval_novel.params.maxDets = [10, 20, 50, 100, 200]
mcoco_eval_novel.params.imgIds = image_ids
mcoco_eval_novel.params.useCats = 0 if not self._use_category else 1
mcoco_eval_novel.evaluate()
mcoco_eval_novel.accumulate()
mcoco_eval_novel.summarize()
mask_coco_metrics_novel = mcoco_eval_novel.stats
# Combine all splits.
if self._include_mask:
metrics = np.hstack((
coco_metrics, coco_metrics_seen, coco_metrics_novel,
mask_coco_metrics, mask_coco_metrics_seen, mask_coco_metrics_novel))
else:
metrics = np.hstack((
coco_metrics, coco_metrics_seen, coco_metrics_novel))
# Cleans up the internal variables in order for a fresh eval next time.
self.reset()
metrics_dict = {}
for i, name in enumerate(self._metric_names):
metrics_dict[name] = metrics[i].astype(np.float32)
return metrics_dict
class OlnXdataEvaluator(OlnXclassEvaluator):
"""COCO evaluation metric class."""
def __init__(self, annotation_file, include_mask, need_rescale_bboxes=True,
use_category=True, seen_class='all'):
"""Constructs COCO evaluation class.
The class provides the interface to metrics_fn in TPUEstimator. The
_update_op() takes detections from each image and push them to
self.detections. The _evaluate() loads a JSON file in COCO annotation format
as the groundtruths and runs COCO evaluation.
Args:
annotation_file: a JSON file that stores annotations of the eval dataset.
If `annotation_file` is None, groundtruth annotations will be loaded
from the dataloader.
include_mask: a boolean to indicate whether or not to include the mask
eval.
need_rescale_bboxes: If true bboxes in `predictions` will be rescaled back
to absolute values (`image_info` is needed in this case).
use_category: if `False`, treat all object in all classes in one
foreground category.
seen_class: 'all' or 'voc' or 'nonvoc'
"""
super(OlnXdataEvaluator, self).__init__(
annotation_file=annotation_file,
include_mask=include_mask,
need_rescale_bboxes=need_rescale_bboxes,
use_category=False,
seen_class='all')
def evaluate(self):
"""Evaluates with detections from all images with COCO API.
Returns:
coco_metric: float numpy array with shape [24] representing the
coco-style evaluation metrics (box and mask).
"""
if not self._annotation_file:
logging.info('Thre is no annotation_file in COCOEvaluator.')
gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
self._groundtruths)
coco_gt = coco_utils.COCOWrapper(
eval_type=('mask' if self._include_mask else 'box'),
gt_dataset=gt_dataset)
else:
logging.info('Using annotation file: %s', self._annotation_file)
coco_gt = self._coco_gt
coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
self._predictions)
coco_dt = coco_gt.loadRes(predictions=coco_predictions)
image_ids = [ann['image_id'] for ann in coco_predictions]
# Class manipulation: 'all' split samples -> ignored_split = 0.
for idx, _ in enumerate(coco_gt.dataset['annotations']):
coco_gt.dataset['annotations'][idx]['ignored_split'] = 0
coco_eval = cocoeval.OlnCOCOevalWrapper(coco_gt, coco_dt, iou_type='bbox')
coco_eval.params.maxDets = [10, 20, 50, 100, 200]
coco_eval.params.imgIds = image_ids
coco_eval.params.useCats = 0 if not self._use_category else 1
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_metrics = coco_eval.stats
if self._include_mask:
mcoco_eval = cocoeval.OlnCOCOevalWrapper(coco_gt, coco_dt,
iou_type='segm')
mcoco_eval.params.maxDets = [10, 20, 50, 100, 200]
mcoco_eval.params.imgIds = image_ids
mcoco_eval.params.useCats = 0 if not self._use_category else 1
mcoco_eval.evaluate()
mcoco_eval.accumulate()
mcoco_eval.summarize()
mask_coco_metrics = mcoco_eval.stats
if self._include_mask:
metrics = np.hstack((coco_metrics, mask_coco_metrics))
else:
metrics = coco_metrics
# Cleans up the internal variables in order for a fresh eval next time.
self.reset()
metrics_dict = {}
for i, name in enumerate(self._metric_names):
metrics_dict[name] = metrics[i].astype(np.float32)
return metrics_dict
class ShapeMaskCOCOEvaluator(COCOEvaluator):
"""COCO evaluation metric class for ShapeMask."""
......
......@@ -29,6 +29,18 @@ def evaluator_generator(params):
elif params.type == 'box_and_mask':
evaluator = coco_evaluator.COCOEvaluator(
annotation_file=params.val_json_file, include_mask=True)
elif params.type == 'oln_xclass_box':
evaluator = coco_evaluator.OlnXclassEvaluator(
annotation_file=params.val_json_file, include_mask=False,
use_category=False, seen_class=params.seen_class,)
elif params.type == 'oln_xclass_box_and_mask':
evaluator = coco_evaluator.OlnXclassEvaluator(
annotation_file=params.val_json_file, include_mask=True,
use_category=False, seen_class=params.seen_class,)
elif params.type == 'oln_xdata_box':
evaluator = coco_evaluator.OlnXdataEvaluator(
annotation_file=params.val_json_file, include_mask=False,
use_category=False, seen_class='all',)
elif params.type == 'shapemask_box_and_mask':
evaluator = coco_evaluator.ShapeMaskCOCOEvaluator(
mask_eval_class=params.mask_eval_class,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment