Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
d4f401e1
Commit
d4f401e1
authored
Jun 16, 2021
by
Gunho Park
Browse files
Merge evaluation metrics to metrics.py
parent
2b676a9b
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
338 additions
and
251 deletions
+338
-251
official/vision/beta/projects/basnet/evaluation/mae.py
official/vision/beta/projects/basnet/evaluation/mae.py
+0
-101
official/vision/beta/projects/basnet/evaluation/max_f.py
official/vision/beta/projects/basnet/evaluation/max_f.py
+0
-131
official/vision/beta/projects/basnet/evaluation/metrics.py
official/vision/beta/projects/basnet/evaluation/metrics.py
+328
-0
official/vision/beta/projects/basnet/evaluation/metrics_test.py
...al/vision/beta/projects/basnet/evaluation/metrics_test.py
+3
-4
official/vision/beta/projects/basnet/tasks/basnet.py
official/vision/beta/projects/basnet/tasks/basnet.py
+7
-9
official/vision/beta/projects/basnet/train.py
official/vision/beta/projects/basnet/train.py
+0
-6
No files found.
official/vision/beta/projects/basnet/evaluation/mae.py
deleted
100644 → 0
View file @
2b676a9b
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This source code is a modified version of
https://github.com/xuebinqin/Binary-Segmentation-Evaluation-Tool
"""
# Import libraries
import
numpy
as
np
class
MAE
(
object
):
"""Mean Absolute Error(MAE) metric for basnet."""
def
__init__
(
self
):
"""Constructs MAE metric class."""
self
.
reset_states
()
@
property
def
name
(
self
):
return
'MAE'
def
reset_states
(
self
):
"""Resets internal states for a fresh run."""
self
.
_predictions
=
[]
self
.
_groundtruths
=
[]
def
result
(
self
):
"""Evaluates segmentation results, and reset_states."""
metric_result
=
self
.
evaluate
()
# Cleans up the internal variables in order for a fresh eval next time.
self
.
reset_states
()
return
metric_result
def
evaluate
(
self
):
"""Evaluates with masks from all images.
Returns:
average_mae: average MAE with float numpy.
"""
mae_total
=
0.0
for
i
,
(
true
,
pred
)
in
enumerate
(
zip
(
self
.
_groundtruths
,
self
.
_predictions
)):
# Compute MAE
mae
=
self
.
_compute_mae
(
true
,
pred
)
mae_total
+=
mae
average_mae
=
mae_total
/
len
(
self
.
_groundtruths
)
average_mae
=
average_mae
.
astype
(
np
.
float32
)
return
average_mae
def
_mask_normalize
(
self
,
mask
):
return
mask
/
(
np
.
amax
(
mask
)
+
1e-8
)
def
_compute_mae
(
self
,
true
,
pred
):
h
,
w
=
true
.
shape
[
0
],
true
.
shape
[
1
]
mask1
=
self
.
_mask_normalize
(
true
)
mask2
=
self
.
_mask_normalize
(
pred
)
sum_error
=
np
.
sum
(
np
.
absolute
((
mask1
.
astype
(
float
)
-
mask2
.
astype
(
float
))))
mae_error
=
sum_error
/
(
float
(
h
)
*
float
(
w
)
+
1e-8
)
return
mae_error
def
_convert_to_numpy
(
self
,
groundtruths
,
predictions
):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths
=
groundtruths
.
numpy
()
numpy_predictions
=
predictions
.
numpy
()
return
numpy_groundtruths
,
numpy_predictions
def
update_state
(
self
,
groundtruths
,
predictions
):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of single Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths
,
predictions
=
self
.
_convert_to_numpy
(
groundtruths
[
0
],
predictions
[
0
])
for
(
true
,
pred
)
in
zip
(
groundtruths
,
predictions
):
self
.
_groundtruths
.
append
(
true
)
self
.
_predictions
.
append
(
pred
)
official/vision/beta/projects/basnet/evaluation/max_f.py
deleted
100644 → 0
View file @
2b676a9b
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This source code is a modified version of
https://github.com/xuebinqin/Binary-Segmentation-Evaluation-Tool
"""
# Import libraries
import
numpy
as
np
class
maxFscore
(
object
):
"""Maximum F-score metric for basnet."""
def
__init__
(
self
):
"""Constructs BASNet evaluation class."""
self
.
reset_states
()
@
property
def
name
(
self
):
return
'maxF'
def
reset_states
(
self
):
"""Resets internal states for a fresh run."""
self
.
_predictions
=
[]
self
.
_groundtruths
=
[]
def
result
(
self
):
"""Evaluates segmentation results, and reset_states."""
metric_result
=
self
.
evaluate
()
# Cleans up the internal variables in order for a fresh eval next time.
self
.
reset_states
()
return
metric_result
def
evaluate
(
self
):
"""Evaluates with masks from all images.
Returns:
f_max: maximum F-score value.
"""
mybins
=
np
.
arange
(
0
,
256
)
beta
=
0.3
precisions
=
np
.
zeros
((
len
(
self
.
_groundtruths
),
len
(
mybins
)
-
1
))
recalls
=
np
.
zeros
((
len
(
self
.
_groundtruths
),
len
(
mybins
)
-
1
))
for
i
,
(
true
,
pred
)
in
enumerate
(
zip
(
self
.
_groundtruths
,
self
.
_predictions
)):
# Compute F-score
true
=
self
.
_mask_normalize
(
true
)
*
255.0
pred
=
self
.
_mask_normalize
(
pred
)
*
255.0
pre
,
rec
=
self
.
_compute_pre_rec
(
true
,
pred
,
mybins
=
np
.
arange
(
0
,
256
))
precisions
[
i
,:]
=
pre
recalls
[
i
,:]
=
rec
precisions
=
np
.
sum
(
precisions
,
0
)
/
(
len
(
self
.
_groundtruths
)
+
1e-8
)
recalls
=
np
.
sum
(
recalls
,
0
)
/
(
len
(
self
.
_groundtruths
)
+
1e-8
)
f
=
(
1
+
beta
)
*
precisions
*
recalls
/
(
beta
*
precisions
+
recalls
+
1e-8
)
f_max
=
np
.
max
(
f
)
f_max
=
f_max
.
astype
(
np
.
float32
)
return
f_max
def
_mask_normalize
(
self
,
mask
):
return
mask
/
(
np
.
amax
(
mask
)
+
1e-8
)
def
_compute_pre_rec
(
self
,
true
,
pred
,
mybins
=
np
.
arange
(
0
,
256
)):
# pixel number of ground truth foreground regions
gt_num
=
true
[
true
>
128
].
size
# mask predicted pixel values in the ground truth foreground region
pp
=
pred
[
true
>
128
]
# mask predicted pixel values in the ground truth bacground region
nn
=
pred
[
true
<=
128
]
pp_hist
,
pp_edges
=
np
.
histogram
(
pp
,
bins
=
mybins
)
nn_hist
,
nn_edges
=
np
.
histogram
(
nn
,
bins
=
mybins
)
pp_hist_flip
=
np
.
flipud
(
pp_hist
)
nn_hist_flip
=
np
.
flipud
(
nn_hist
)
pp_hist_flip_cum
=
np
.
cumsum
(
pp_hist_flip
)
nn_hist_flip_cum
=
np
.
cumsum
(
nn_hist_flip
)
precision
=
pp_hist_flip_cum
/
(
pp_hist_flip_cum
+
nn_hist_flip_cum
+
1e-8
)
#TP/(TP+FP)
recall
=
pp_hist_flip_cum
/
(
gt_num
+
1e-8
)
#TP/(TP+FN)
precision
[
np
.
isnan
(
precision
)]
=
0.0
recall
[
np
.
isnan
(
recall
)]
=
0.0
pre_len
=
len
(
precision
)
rec_len
=
len
(
recall
)
return
np
.
reshape
(
precision
,(
pre_len
)),
np
.
reshape
(
recall
,(
rec_len
))
def
_convert_to_numpy
(
self
,
groundtruths
,
predictions
):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths
=
groundtruths
.
numpy
()
numpy_predictions
=
predictions
.
numpy
()
return
numpy_groundtruths
,
numpy_predictions
def
update_state
(
self
,
groundtruths
,
predictions
):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of signle Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths
,
predictions
=
self
.
_convert_to_numpy
(
groundtruths
[
0
],
predictions
[
0
])
for
(
true
,
pred
)
in
zip
(
groundtruths
,
predictions
):
self
.
_groundtruths
.
append
(
true
)
self
.
_predictions
.
append
(
pred
)
official/vision/beta/projects/basnet/evaluation/
relax_f
.py
→
official/vision/beta/projects/basnet/evaluation/
metrics
.py
View file @
d4f401e1
...
...
@@ -12,12 +12,204 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The MAE and maxFscore implementations are a modified version of
https://github.com/xuebinqin/Binary-Segmentation-Evaluation-Tool
"""
# Import libraries
import
numpy
as
np
from
scipy
import
signal
class
MAE
(
object
):
"""Mean Absolute Error(MAE) metric for basnet."""
def
__init__
(
self
):
"""Constructs MAE metric class."""
self
.
reset_states
()
@
property
def
name
(
self
):
return
'MAE'
def
reset_states
(
self
):
"""Resets internal states for a fresh run."""
self
.
_predictions
=
[]
self
.
_groundtruths
=
[]
def
result
(
self
):
"""Evaluates segmentation results, and reset_states."""
metric_result
=
self
.
evaluate
()
# Cleans up the internal variables in order for a fresh eval next time.
self
.
reset_states
()
return
metric_result
def
evaluate
(
self
):
"""Evaluates with masks from all images.
Returns:
average_mae: average MAE with float numpy.
"""
mae_total
=
0.0
for
i
,
(
true
,
pred
)
in
enumerate
(
zip
(
self
.
_groundtruths
,
self
.
_predictions
)):
# Compute MAE
mae
=
self
.
_compute_mae
(
true
,
pred
)
mae_total
+=
mae
average_mae
=
mae_total
/
len
(
self
.
_groundtruths
)
average_mae
=
average_mae
.
astype
(
np
.
float32
)
return
average_mae
def
_mask_normalize
(
self
,
mask
):
return
mask
/
(
np
.
amax
(
mask
)
+
1e-8
)
def
_compute_mae
(
self
,
true
,
pred
):
h
,
w
=
true
.
shape
[
0
],
true
.
shape
[
1
]
mask1
=
self
.
_mask_normalize
(
true
)
mask2
=
self
.
_mask_normalize
(
pred
)
sum_error
=
np
.
sum
(
np
.
absolute
((
mask1
.
astype
(
float
)
-
mask2
.
astype
(
float
))))
mae_error
=
sum_error
/
(
float
(
h
)
*
float
(
w
)
+
1e-8
)
return
mae_error
def
_convert_to_numpy
(
self
,
groundtruths
,
predictions
):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths
=
groundtruths
.
numpy
()
numpy_predictions
=
predictions
.
numpy
()
return
numpy_groundtruths
,
numpy_predictions
def
update_state
(
self
,
groundtruths
,
predictions
):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of single Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths
,
predictions
=
self
.
_convert_to_numpy
(
groundtruths
[
0
],
predictions
[
0
])
for
(
true
,
pred
)
in
zip
(
groundtruths
,
predictions
):
self
.
_groundtruths
.
append
(
true
)
self
.
_predictions
.
append
(
pred
)
class
maxFscore
(
object
):
"""Maximum F-score metric for basnet."""
def
__init__
(
self
):
"""Constructs BASNet evaluation class."""
self
.
reset_states
()
@
property
def
name
(
self
):
return
'maxF'
def
reset_states
(
self
):
"""Resets internal states for a fresh run."""
self
.
_predictions
=
[]
self
.
_groundtruths
=
[]
def
result
(
self
):
"""Evaluates segmentation results, and reset_states."""
metric_result
=
self
.
evaluate
()
# Cleans up the internal variables in order for a fresh eval next time.
self
.
reset_states
()
return
metric_result
def
evaluate
(
self
):
"""Evaluates with masks from all images.
Returns:
f_max: maximum F-score value.
"""
mybins
=
np
.
arange
(
0
,
256
)
beta
=
0.3
precisions
=
np
.
zeros
((
len
(
self
.
_groundtruths
),
len
(
mybins
)
-
1
))
recalls
=
np
.
zeros
((
len
(
self
.
_groundtruths
),
len
(
mybins
)
-
1
))
for
i
,
(
true
,
pred
)
in
enumerate
(
zip
(
self
.
_groundtruths
,
self
.
_predictions
)):
# Compute F-score
true
=
self
.
_mask_normalize
(
true
)
*
255.0
pred
=
self
.
_mask_normalize
(
pred
)
*
255.0
pre
,
rec
=
self
.
_compute_pre_rec
(
true
,
pred
,
mybins
=
np
.
arange
(
0
,
256
))
precisions
[
i
,:]
=
pre
recalls
[
i
,:]
=
rec
precisions
=
np
.
sum
(
precisions
,
0
)
/
(
len
(
self
.
_groundtruths
)
+
1e-8
)
recalls
=
np
.
sum
(
recalls
,
0
)
/
(
len
(
self
.
_groundtruths
)
+
1e-8
)
f
=
(
1
+
beta
)
*
precisions
*
recalls
/
(
beta
*
precisions
+
recalls
+
1e-8
)
f_max
=
np
.
max
(
f
)
f_max
=
f_max
.
astype
(
np
.
float32
)
return
f_max
def
_mask_normalize
(
self
,
mask
):
return
mask
/
(
np
.
amax
(
mask
)
+
1e-8
)
def
_compute_pre_rec
(
self
,
true
,
pred
,
mybins
=
np
.
arange
(
0
,
256
)):
# pixel number of ground truth foreground regions
gt_num
=
true
[
true
>
128
].
size
# mask predicted pixel values in the ground truth foreground region
pp
=
pred
[
true
>
128
]
# mask predicted pixel values in the ground truth bacground region
nn
=
pred
[
true
<=
128
]
pp_hist
,
pp_edges
=
np
.
histogram
(
pp
,
bins
=
mybins
)
nn_hist
,
nn_edges
=
np
.
histogram
(
nn
,
bins
=
mybins
)
pp_hist_flip
=
np
.
flipud
(
pp_hist
)
nn_hist_flip
=
np
.
flipud
(
nn_hist
)
pp_hist_flip_cum
=
np
.
cumsum
(
pp_hist_flip
)
nn_hist_flip_cum
=
np
.
cumsum
(
nn_hist_flip
)
precision
=
pp_hist_flip_cum
/
(
pp_hist_flip_cum
+
nn_hist_flip_cum
+
1e-8
)
#TP/(TP+FP)
recall
=
pp_hist_flip_cum
/
(
gt_num
+
1e-8
)
#TP/(TP+FN)
precision
[
np
.
isnan
(
precision
)]
=
0.0
recall
[
np
.
isnan
(
recall
)]
=
0.0
pre_len
=
len
(
precision
)
rec_len
=
len
(
recall
)
return
np
.
reshape
(
precision
,(
pre_len
)),
np
.
reshape
(
recall
,(
rec_len
))
def
_convert_to_numpy
(
self
,
groundtruths
,
predictions
):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths
=
groundtruths
.
numpy
()
numpy_predictions
=
predictions
.
numpy
()
return
numpy_groundtruths
,
numpy_predictions
def
update_state
(
self
,
groundtruths
,
predictions
):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of signle Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths
,
predictions
=
self
.
_convert_to_numpy
(
groundtruths
[
0
],
predictions
[
0
])
for
(
true
,
pred
)
in
zip
(
groundtruths
,
predictions
):
self
.
_groundtruths
.
append
(
true
)
self
.
_predictions
.
append
(
pred
)
class
relaxedFscore
(
object
):
"""Relaxed F-score metric for basnet."""
...
...
official/vision/beta/projects/basnet/evaluation/
evaluation
_test.py
→
official/vision/beta/projects/basnet/evaluation/
metrics
_test.py
View file @
d4f401e1
...
...
@@ -21,8 +21,7 @@ from absl.testing import parameterized
import
numpy
as
np
import
tensorflow
as
tf
from
official.vision.beta.projects.basnet.evaluation
import
mae
from
official.vision.beta.projects.basnet.evaluation
import
max_f
from
official.vision.beta.projects.basnet.evaluation
import
metrics
class
BASNetMetricTest
(
parameterized
.
TestCase
,
tf
.
test
.
TestCase
):
...
...
@@ -33,7 +32,7 @@ class BASNetMetricTest(parameterized.TestCase, tf.test.TestCase):
inputs
=
(
tf
.
random
.
uniform
([
2
,
input_size
,
input_size
,
1
]),)
labels
=
(
tf
.
random
.
uniform
([
2
,
input_size
,
input_size
,
1
]),)
mae_obj
=
m
a
e
.
MAE
()
mae_obj
=
me
trics
.
MAE
()
mae_obj
.
reset_states
()
mae_obj
.
update_state
(
labels
,
inputs
)
output
=
mae_obj
.
result
()
...
...
@@ -52,7 +51,7 @@ class BASNetMetricTest(parameterized.TestCase, tf.test.TestCase):
inputs
=
(
tf
.
random
.
uniform
([
2
,
input_size
,
input_size
,
1
]),)
labels
=
(
tf
.
random
.
uniform
([
2
,
input_size
,
input_size
,
1
]),)
max_f_obj
=
m
ax_f
.
maxFscore
()
max_f_obj
=
m
etrics
.
maxFscore
()
max_f_obj
.
reset_states
()
max_f_obj
.
update_state
(
labels
,
inputs
)
output
=
max_f_obj
.
result
()
...
...
official/vision/beta/projects/basnet/tasks/basnet.py
View file @
d4f401e1
...
...
@@ -25,9 +25,7 @@ from official.core import input_reader
from
official.core
import
task_factory
from
official.vision.beta.projects.basnet.configs
import
basnet
as
exp_cfg
from
official.vision.beta.dataloaders
import
segmentation_input
from
official.vision.beta.projects.basnet.evaluation
import
max_f
from
official.vision.beta.projects.basnet.evaluation
import
relax_f
from
official.vision.beta.projects.basnet.evaluation
import
mae
from
official.vision.beta.projects.basnet.evaluation
import
metrics
from
official.vision.beta.projects.basnet.losses
import
basnet_losses
from
official.vision.beta.projects.basnet.modeling
import
basnet_model
...
...
@@ -155,16 +153,16 @@ class BASNetTask(base_task.Task):
def
build_metrics
(
self
,
training
=
False
):
"""Gets streaming metrics for training/validation."""
metric
s
=
[]
evaluation
s
=
[]
if
training
:
metric
s
=
[]
evaluation
s
=
[]
else
:
self
.
mae_metric
=
m
a
e
.
MAE
()
self
.
maxf_metric
=
m
ax_f
.
maxFscore
()
self
.
relaxf_metric
=
relax_f
.
relaxedFscore
()
self
.
mae_metric
=
me
trics
.
MAE
()
self
.
maxf_metric
=
m
etrics
.
maxFscore
()
self
.
relaxf_metric
=
metrics
.
relaxedFscore
()
return
metric
s
return
evaluation
s
def
train_step
(
self
,
inputs
,
model
,
optimizer
,
metrics
=
None
):
"""Does forward and backward.
...
...
official/vision/beta/projects/basnet/train.py
View file @
d4f401e1
...
...
@@ -30,12 +30,6 @@ from official.core import train_lib
from
official.core
import
train_utils
from
official.modeling
import
performance
#import os
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 2, 3"
FLAGS
=
flags
.
FLAGS
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment