"...composable_kernel-1.git" did not exist on "73ca970155a96952a3bf245511b8412521cad4ff"
Commit d4f401e1 authored by Gunho Park's avatar Gunho Park
Browse files

Merge evaluation metrics to metrics.py

parent 2b676a9b
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This source code is a modified version of
https://github.com/xuebinqin/Binary-Segmentation-Evaluation-Tool
"""
# Import libraries
import numpy as np
class MAE(object):
"""Mean Absolute Error(MAE) metric for basnet."""
def __init__(self):
"""Constructs MAE metric class."""
self.reset_states()
@property
def name(self):
return 'MAE'
def reset_states(self):
"""Resets internal states for a fresh run."""
self._predictions = []
self._groundtruths = []
def result(self):
"""Evaluates segmentation results, and reset_states."""
metric_result = self.evaluate()
# Cleans up the internal variables in order for a fresh eval next time.
self.reset_states()
return metric_result
def evaluate(self):
"""Evaluates with masks from all images.
Returns:
average_mae: average MAE with float numpy.
"""
mae_total = 0.0
for i, (true, pred) in enumerate(zip(self._groundtruths,
self._predictions)):
# Compute MAE
mae = self._compute_mae(true, pred)
mae_total += mae
average_mae = mae_total/len(self._groundtruths)
average_mae = average_mae.astype(np.float32)
return average_mae
def _mask_normalize(self, mask):
return mask/(np.amax(mask)+1e-8)
def _compute_mae(self, true, pred):
h, w = true.shape[0], true.shape[1]
mask1 = self._mask_normalize(true)
mask2 = self._mask_normalize(pred)
sum_error = np.sum(np.absolute((mask1.astype(float) - mask2.astype(float))))
mae_error = sum_error/(float(h)*float(w)+1e-8)
return mae_error
def _convert_to_numpy(self, groundtruths, predictions):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths = groundtruths.numpy()
numpy_predictions = predictions.numpy()
return numpy_groundtruths, numpy_predictions
def update_state(self, groundtruths, predictions):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of single Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths, predictions = self._convert_to_numpy(groundtruths[0],
predictions[0])
for (true, pred) in zip(groundtruths, predictions):
self._groundtruths.append(true)
self._predictions.append(pred)
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This source code is a modified version of
https://github.com/xuebinqin/Binary-Segmentation-Evaluation-Tool
"""
# Import libraries
import numpy as np
class maxFscore(object):
"""Maximum F-score metric for basnet."""
def __init__(self):
"""Constructs BASNet evaluation class."""
self.reset_states()
@property
def name(self):
return 'maxF'
def reset_states(self):
"""Resets internal states for a fresh run."""
self._predictions = []
self._groundtruths = []
def result(self):
"""Evaluates segmentation results, and reset_states."""
metric_result = self.evaluate()
# Cleans up the internal variables in order for a fresh eval next time.
self.reset_states()
return metric_result
def evaluate(self):
"""Evaluates with masks from all images.
Returns:
f_max: maximum F-score value.
"""
mybins = np.arange(0, 256)
beta = 0.3
precisions = np.zeros((len(self._groundtruths), len(mybins)-1))
recalls = np.zeros((len(self._groundtruths), len(mybins)-1))
for i, (true, pred) in enumerate(zip(self._groundtruths,
self._predictions)):
# Compute F-score
true = self._mask_normalize(true)*255.0
pred = self._mask_normalize(pred)*255.0
pre, rec = self._compute_pre_rec(true, pred, mybins=np.arange(0,256))
precisions[i,:] = pre
recalls[i,:] = rec
precisions = np.sum(precisions,0)/(len(self._groundtruths)+1e-8)
recalls = np.sum(recalls,0)/(len(self._groundtruths)+1e-8)
f = (1+beta)*precisions*recalls/(beta*precisions+recalls+1e-8)
f_max = np.max(f)
f_max = f_max.astype(np.float32)
return f_max
def _mask_normalize(self, mask):
return mask/(np.amax(mask)+1e-8)
def _compute_pre_rec(self, true, pred, mybins=np.arange(0,256)):
# pixel number of ground truth foreground regions
gt_num = true[true>128].size
# mask predicted pixel values in the ground truth foreground region
pp = pred[true>128]
# mask predicted pixel values in the ground truth bacground region
nn = pred[true<=128]
pp_hist,pp_edges = np.histogram(pp,bins=mybins)
nn_hist,nn_edges = np.histogram(nn,bins=mybins)
pp_hist_flip = np.flipud(pp_hist)
nn_hist_flip = np.flipud(nn_hist)
pp_hist_flip_cum = np.cumsum(pp_hist_flip)
nn_hist_flip_cum = np.cumsum(nn_hist_flip)
precision = pp_hist_flip_cum/(pp_hist_flip_cum + nn_hist_flip_cum+1e-8) #TP/(TP+FP)
recall = pp_hist_flip_cum/(gt_num+1e-8) #TP/(TP+FN)
precision[np.isnan(precision)]= 0.0
recall[np.isnan(recall)] = 0.0
pre_len = len(precision)
rec_len = len(recall)
return np.reshape(precision,(pre_len)), np.reshape(recall,(rec_len))
def _convert_to_numpy(self, groundtruths, predictions):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths = groundtruths.numpy()
numpy_predictions = predictions.numpy()
return numpy_groundtruths, numpy_predictions
def update_state(self, groundtruths, predictions):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of signle Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths, predictions = self._convert_to_numpy(groundtruths[0],
predictions[0])
for (true, pred) in zip(groundtruths, predictions):
self._groundtruths.append(true)
self._predictions.append(pred)
...@@ -12,12 +12,204 @@ ...@@ -12,12 +12,204 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
"""
The MAE and maxFscore implementations are a modified version of
https://github.com/xuebinqin/Binary-Segmentation-Evaluation-Tool
"""
# Import libraries # Import libraries
import numpy as np import numpy as np
from scipy import signal from scipy import signal
class MAE(object):
"""Mean Absolute Error(MAE) metric for basnet."""
def __init__(self):
"""Constructs MAE metric class."""
self.reset_states()
@property
def name(self):
return 'MAE'
def reset_states(self):
"""Resets internal states for a fresh run."""
self._predictions = []
self._groundtruths = []
def result(self):
"""Evaluates segmentation results, and reset_states."""
metric_result = self.evaluate()
# Cleans up the internal variables in order for a fresh eval next time.
self.reset_states()
return metric_result
def evaluate(self):
"""Evaluates with masks from all images.
Returns:
average_mae: average MAE with float numpy.
"""
mae_total = 0.0
for i, (true, pred) in enumerate(zip(self._groundtruths,
self._predictions)):
# Compute MAE
mae = self._compute_mae(true, pred)
mae_total += mae
average_mae = mae_total/len(self._groundtruths)
average_mae = average_mae.astype(np.float32)
return average_mae
def _mask_normalize(self, mask):
return mask/(np.amax(mask)+1e-8)
def _compute_mae(self, true, pred):
h, w = true.shape[0], true.shape[1]
mask1 = self._mask_normalize(true)
mask2 = self._mask_normalize(pred)
sum_error = np.sum(np.absolute((mask1.astype(float) - mask2.astype(float))))
mae_error = sum_error/(float(h)*float(w)+1e-8)
return mae_error
def _convert_to_numpy(self, groundtruths, predictions):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths = groundtruths.numpy()
numpy_predictions = predictions.numpy()
return numpy_groundtruths, numpy_predictions
def update_state(self, groundtruths, predictions):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of single Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths, predictions = self._convert_to_numpy(groundtruths[0],
predictions[0])
for (true, pred) in zip(groundtruths, predictions):
self._groundtruths.append(true)
self._predictions.append(pred)
class maxFscore(object):
"""Maximum F-score metric for basnet."""
def __init__(self):
"""Constructs BASNet evaluation class."""
self.reset_states()
@property
def name(self):
return 'maxF'
def reset_states(self):
"""Resets internal states for a fresh run."""
self._predictions = []
self._groundtruths = []
def result(self):
"""Evaluates segmentation results, and reset_states."""
metric_result = self.evaluate()
# Cleans up the internal variables in order for a fresh eval next time.
self.reset_states()
return metric_result
def evaluate(self):
"""Evaluates with masks from all images.
Returns:
f_max: maximum F-score value.
"""
mybins = np.arange(0, 256)
beta = 0.3
precisions = np.zeros((len(self._groundtruths), len(mybins)-1))
recalls = np.zeros((len(self._groundtruths), len(mybins)-1))
for i, (true, pred) in enumerate(zip(self._groundtruths,
self._predictions)):
# Compute F-score
true = self._mask_normalize(true)*255.0
pred = self._mask_normalize(pred)*255.0
pre, rec = self._compute_pre_rec(true, pred, mybins=np.arange(0,256))
precisions[i,:] = pre
recalls[i,:] = rec
precisions = np.sum(precisions,0)/(len(self._groundtruths)+1e-8)
recalls = np.sum(recalls,0)/(len(self._groundtruths)+1e-8)
f = (1+beta)*precisions*recalls/(beta*precisions+recalls+1e-8)
f_max = np.max(f)
f_max = f_max.astype(np.float32)
return f_max
def _mask_normalize(self, mask):
return mask/(np.amax(mask)+1e-8)
def _compute_pre_rec(self, true, pred, mybins=np.arange(0,256)):
# pixel number of ground truth foreground regions
gt_num = true[true>128].size
# mask predicted pixel values in the ground truth foreground region
pp = pred[true>128]
# mask predicted pixel values in the ground truth bacground region
nn = pred[true<=128]
pp_hist,pp_edges = np.histogram(pp,bins=mybins)
nn_hist,nn_edges = np.histogram(nn,bins=mybins)
pp_hist_flip = np.flipud(pp_hist)
nn_hist_flip = np.flipud(nn_hist)
pp_hist_flip_cum = np.cumsum(pp_hist_flip)
nn_hist_flip_cum = np.cumsum(nn_hist_flip)
precision = pp_hist_flip_cum/(pp_hist_flip_cum + nn_hist_flip_cum+1e-8) #TP/(TP+FP)
recall = pp_hist_flip_cum/(gt_num+1e-8) #TP/(TP+FN)
precision[np.isnan(precision)]= 0.0
recall[np.isnan(recall)] = 0.0
pre_len = len(precision)
rec_len = len(recall)
return np.reshape(precision,(pre_len)), np.reshape(recall,(rec_len))
def _convert_to_numpy(self, groundtruths, predictions):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths = groundtruths.numpy()
numpy_predictions = predictions.numpy()
return numpy_groundtruths, numpy_predictions
def update_state(self, groundtruths, predictions):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of signle Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths, predictions = self._convert_to_numpy(groundtruths[0],
predictions[0])
for (true, pred) in zip(groundtruths, predictions):
self._groundtruths.append(true)
self._predictions.append(pred)
class relaxedFscore(object): class relaxedFscore(object):
"""Relaxed F-score metric for basnet.""" """Relaxed F-score metric for basnet."""
......
...@@ -21,8 +21,7 @@ from absl.testing import parameterized ...@@ -21,8 +21,7 @@ from absl.testing import parameterized
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from official.vision.beta.projects.basnet.evaluation import mae from official.vision.beta.projects.basnet.evaluation import metrics
from official.vision.beta.projects.basnet.evaluation import max_f
class BASNetMetricTest(parameterized.TestCase, tf.test.TestCase): class BASNetMetricTest(parameterized.TestCase, tf.test.TestCase):
...@@ -33,7 +32,7 @@ class BASNetMetricTest(parameterized.TestCase, tf.test.TestCase): ...@@ -33,7 +32,7 @@ class BASNetMetricTest(parameterized.TestCase, tf.test.TestCase):
inputs = (tf.random.uniform([2, input_size, input_size, 1]),) inputs = (tf.random.uniform([2, input_size, input_size, 1]),)
labels = (tf.random.uniform([2, input_size, input_size, 1]),) labels = (tf.random.uniform([2, input_size, input_size, 1]),)
mae_obj = mae.MAE() mae_obj = metrics.MAE()
mae_obj.reset_states() mae_obj.reset_states()
mae_obj.update_state(labels, inputs) mae_obj.update_state(labels, inputs)
output = mae_obj.result() output = mae_obj.result()
...@@ -52,7 +51,7 @@ class BASNetMetricTest(parameterized.TestCase, tf.test.TestCase): ...@@ -52,7 +51,7 @@ class BASNetMetricTest(parameterized.TestCase, tf.test.TestCase):
inputs = (tf.random.uniform([2, input_size, input_size, 1]),) inputs = (tf.random.uniform([2, input_size, input_size, 1]),)
labels = (tf.random.uniform([2, input_size, input_size, 1]),) labels = (tf.random.uniform([2, input_size, input_size, 1]),)
max_f_obj = max_f.maxFscore() max_f_obj = metrics.maxFscore()
max_f_obj.reset_states() max_f_obj.reset_states()
max_f_obj.update_state(labels, inputs) max_f_obj.update_state(labels, inputs)
output = max_f_obj.result() output = max_f_obj.result()
......
...@@ -25,9 +25,7 @@ from official.core import input_reader ...@@ -25,9 +25,7 @@ from official.core import input_reader
from official.core import task_factory from official.core import task_factory
from official.vision.beta.projects.basnet.configs import basnet as exp_cfg from official.vision.beta.projects.basnet.configs import basnet as exp_cfg
from official.vision.beta.dataloaders import segmentation_input from official.vision.beta.dataloaders import segmentation_input
from official.vision.beta.projects.basnet.evaluation import max_f from official.vision.beta.projects.basnet.evaluation import metrics
from official.vision.beta.projects.basnet.evaluation import relax_f
from official.vision.beta.projects.basnet.evaluation import mae
from official.vision.beta.projects.basnet.losses import basnet_losses from official.vision.beta.projects.basnet.losses import basnet_losses
from official.vision.beta.projects.basnet.modeling import basnet_model from official.vision.beta.projects.basnet.modeling import basnet_model
...@@ -155,16 +153,16 @@ class BASNetTask(base_task.Task): ...@@ -155,16 +153,16 @@ class BASNetTask(base_task.Task):
def build_metrics(self, training=False): def build_metrics(self, training=False):
"""Gets streaming metrics for training/validation.""" """Gets streaming metrics for training/validation."""
metrics = [] evaluations = []
if training: if training:
metrics = [] evaluations = []
else: else:
self.mae_metric = mae.MAE() self.mae_metric = metrics.MAE()
self.maxf_metric = max_f.maxFscore() self.maxf_metric = metrics.maxFscore()
self.relaxf_metric = relax_f.relaxedFscore() self.relaxf_metric = metrics.relaxedFscore()
return metrics return evaluations
def train_step(self, inputs, model, optimizer, metrics=None): def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward. """Does forward and backward.
......
...@@ -30,12 +30,6 @@ from official.core import train_lib ...@@ -30,12 +30,6 @@ from official.core import train_lib
from official.core import train_utils from official.core import train_utils
from official.modeling import performance from official.modeling import performance
#import os
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 2, 3"
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment