Commit 221a6571 authored by mibaumgartner's avatar mibaumgartner
Browse files

FROC and CaseEval docs

parent 880d40a1
...@@ -23,12 +23,14 @@ from sklearn.metrics import accuracy_score, average_precision_score, confusion_m ...@@ -23,12 +23,14 @@ from sklearn.metrics import accuracy_score, average_precision_score, confusion_m
f1_score, precision_score, recall_score, roc_auc_score f1_score, precision_score, recall_score, roc_auc_score
from nndet.evaluator import AbstractEvaluator from nndet.evaluator import AbstractEvaluator
from nndet.utils.info import experimental
__all__ = ["CaseEvaluator"] __all__ = ["CaseEvaluator"]
class _CaseEvaluator(AbstractEvaluator): class _CaseEvaluator(AbstractEvaluator):
@experimental
def __init__(self, def __init__(self,
classes: Sequence[Union[str, int]], classes: Sequence[Union[str, int]],
score_metrics_scalar: Mapping[str, Callable] = None, score_metrics_scalar: Mapping[str, Callable] = None,
...@@ -44,6 +46,9 @@ class _CaseEvaluator(AbstractEvaluator): ...@@ -44,6 +46,9 @@ class _CaseEvaluator(AbstractEvaluator):
is computed by an argmax over that scores. The mappings of the is computed by an argmax over that scores. The mappings of the
metrics are later used as the keys of the result dict. metrics are later used as the keys of the result dict.
Note this implementation is experimental and might change in the
future.
Args: Args:
classes: class present in whole dataset classes: class present in whole dataset
score_metrics_scalar: metrics which accept ground truth classes [N] score_metrics_scalar: metrics which accept ground truth classes [N]
......
...@@ -28,8 +28,11 @@ from nndet.evaluator import DetectionMetric ...@@ -28,8 +28,11 @@ from nndet.evaluator import DetectionMetric
from sklearn.metrics import roc_curve from sklearn.metrics import roc_curve
from collections import defaultdict from collections import defaultdict
from nndet.utils.info import experimental
class FROCMetric(DetectionMetric): class FROCMetric(DetectionMetric):
@experimental
def __init__(self, def __init__(self,
classes: Sequence[str], classes: Sequence[str],
iou_thresholds: Sequence[float] = (0.1, 0.5), iou_thresholds: Sequence[float] = (0.1, 0.5),
...@@ -39,6 +42,13 @@ class FROCMetric(DetectionMetric): ...@@ -39,6 +42,13 @@ class FROCMetric(DetectionMetric):
): ):
""" """
Class to compute FROC Class to compute FROC
Multiclass FROC: This implementation performs the FROC over all
objects regardless of their class which assigns each object the
same "weight".
Note this implementation is experimental and might change in the
future. Please prefer the AP metric for now.
Args: Args:
classes: name of each class classes: name of each class
...@@ -259,7 +269,7 @@ class FROCMetric(DetectionMetric): ...@@ -259,7 +269,7 @@ class FROCMetric(DetectionMetric):
froc_curves_cls = {} froc_curves_cls = {}
for cls_idx, cls_str in enumerate(self.classes): for cls_idx, cls_str in enumerate(self.classes):
# filter current class from list of results and put them into a dict with a single entry # filter current class from list of results and put them into a dict with a single entry
results_by_cls = [{0: r[cls_idx]} for r in results_list if cls_idx in r if cls_idx in r] results_by_cls = [{0: r[cls_idx]} if cls_idx in r else {} for r in results_list]
if results_by_cls: if results_by_cls:
cls_scores, cls_curves = self.compute_froc_mul_iou(results_by_cls) cls_scores, cls_curves = self.compute_froc_mul_iou(results_by_cls)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment