Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
6d806a0a
Commit
6d806a0a
authored
Dec 10, 2020
by
Zhenyu Tan
Committed by
A. Unique TensorFlower
Dec 10, 2020
Browse files
Provide PerClass IoU
PiperOrigin-RevId: 346895957
parent
87d7e974
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
245 additions
and
0 deletions
+245
-0
official/vision/keras_cv/__init__.py
official/vision/keras_cv/__init__.py
+1
-0
official/vision/keras_cv/metrics/__init__.py
official/vision/keras_cv/metrics/__init__.py
+16
-0
official/vision/keras_cv/metrics/iou.py
official/vision/keras_cv/metrics/iou.py
+129
-0
official/vision/keras_cv/metrics/iou_test.py
official/vision/keras_cv/metrics/iou_test.py
+99
-0
No files found.
official/vision/keras_cv/__init__.py
View file @
6d806a0a
...
...
@@ -16,4 +16,5 @@
# pylint: disable=wildcard-import
from
official.vision.keras_cv
import
layers
from
official.vision.keras_cv
import
losses
from
official.vision.keras_cv
import
metrics
from
official.vision.keras_cv
import
ops
official/vision/keras_cv/metrics/__init__.py
0 → 100644
View file @
6d806a0a
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-CV metrics package definition."""
from
official.vision.keras_cv.metrics.iou
import
PerClassIoU
official/vision/keras_cv/metrics/iou.py
0 → 100644
View file @
6d806a0a
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IOU Metrics used for semantic segmentation models."""
import
numpy
as
np
import
tensorflow
as
tf
class
PerClassIoU
(
tf
.
keras
.
metrics
.
Metric
):
"""Computes the per-class Intersection-Over-Union metric.
Mean Intersection-Over-Union is a common evaluation metric for semantic image
segmentation, which first computes the IOU for each semantic class.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Example:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = [(1 / (2 + 2 - 1), 1 / (2 + 2 - 1)] = 0.33
>>> m = tf.keras.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result().numpy()
[0.33333334, 0.33333334]
"""
def
__init__
(
self
,
num_classes
,
name
=
None
,
dtype
=
None
):
"""Initializes `PerClassIoU`.
Arguments:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super
(
PerClassIoU
,
self
).
__init__
(
name
=
name
,
dtype
=
dtype
)
self
.
num_classes
=
num_classes
# Variable to accumulate the predictions in the confusion matrix.
self
.
total_cm
=
self
.
add_weight
(
'total_confusion_matrix'
,
shape
=
(
num_classes
,
num_classes
),
initializer
=
tf
.
compat
.
v1
.
zeros_initializer
)
def
update_state
(
self
,
y_true
,
y_pred
,
sample_weight
=
None
):
"""Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
IOU per class.
"""
y_true
=
tf
.
cast
(
y_true
,
self
.
_dtype
)
y_pred
=
tf
.
cast
(
y_pred
,
self
.
_dtype
)
# Flatten the input if its rank > 1.
if
y_pred
.
shape
.
ndims
>
1
:
y_pred
=
tf
.
reshape
(
y_pred
,
[
-
1
])
if
y_true
.
shape
.
ndims
>
1
:
y_true
=
tf
.
reshape
(
y_true
,
[
-
1
])
if
sample_weight
is
not
None
:
sample_weight
=
tf
.
cast
(
sample_weight
,
self
.
_dtype
)
if
sample_weight
.
shape
.
ndims
>
1
:
sample_weight
=
tf
.
reshape
(
sample_weight
,
[
-
1
])
# Accumulate the prediction to current confusion matrix.
current_cm
=
tf
.
math
.
confusion_matrix
(
y_true
,
y_pred
,
self
.
num_classes
,
weights
=
sample_weight
,
dtype
=
self
.
_dtype
)
return
self
.
total_cm
.
assign_add
(
current_cm
)
def
result
(
self
):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row
=
tf
.
cast
(
tf
.
reduce_sum
(
self
.
total_cm
,
axis
=
0
),
dtype
=
self
.
_dtype
)
sum_over_col
=
tf
.
cast
(
tf
.
reduce_sum
(
self
.
total_cm
,
axis
=
1
),
dtype
=
self
.
_dtype
)
true_positives
=
tf
.
cast
(
tf
.
linalg
.
tensor_diag_part
(
self
.
total_cm
),
dtype
=
self
.
_dtype
)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator
=
sum_over_row
+
sum_over_col
-
true_positives
return
tf
.
math
.
divide_no_nan
(
true_positives
,
denominator
)
def
reset_states
(
self
):
tf
.
keras
.
backend
.
set_value
(
self
.
total_cm
,
np
.
zeros
((
self
.
num_classes
,
self
.
num_classes
)))
def
get_config
(
self
):
config
=
{
'num_classes'
:
self
.
num_classes
}
base_config
=
super
(
PerClassIoU
,
self
).
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
official/vision/keras_cv/metrics/iou_test.py
0 → 100644
View file @
6d806a0a
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
import
tensorflow
as
tf
from
official.vision.keras_cv.metrics
import
iou
class
MeanIoUTest
(
tf
.
test
.
TestCase
):
def
test_config
(
self
):
m_obj
=
iou
.
PerClassIoU
(
num_classes
=
2
,
name
=
'per_class_iou'
)
self
.
assertEqual
(
m_obj
.
name
,
'per_class_iou'
)
self
.
assertEqual
(
m_obj
.
num_classes
,
2
)
m_obj2
=
iou
.
PerClassIoU
.
from_config
(
m_obj
.
get_config
())
self
.
assertEqual
(
m_obj2
.
name
,
'per_class_iou'
)
self
.
assertEqual
(
m_obj2
.
num_classes
,
2
)
def
test_unweighted
(
self
):
y_pred
=
[
0
,
1
,
0
,
1
]
y_true
=
[
0
,
0
,
1
,
1
]
m_obj
=
iou
.
PerClassIoU
(
num_classes
=
2
)
result
=
m_obj
(
y_true
,
y_pred
)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result
=
[
1
/
(
2
+
2
-
1
),
1
/
(
2
+
2
-
1
)]
self
.
assertAllClose
(
expected_result
,
result
,
atol
=
1e-3
)
def
test_weighted
(
self
):
y_pred
=
tf
.
constant
([
0
,
1
,
0
,
1
],
dtype
=
tf
.
float32
)
y_true
=
tf
.
constant
([
0
,
0
,
1
,
1
])
sample_weight
=
tf
.
constant
([
0.2
,
0.3
,
0.4
,
0.1
])
m_obj
=
iou
.
PerClassIoU
(
num_classes
=
2
)
result
=
m_obj
(
y_true
,
y_pred
,
sample_weight
=
sample_weight
)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result
=
[
0.2
/
(
0.6
+
0.5
-
0.2
),
0.1
/
(
0.4
+
0.5
-
0.1
)]
self
.
assertAllClose
(
expected_result
,
result
,
atol
=
1e-3
)
def
test_multi_dim_input
(
self
):
y_pred
=
tf
.
constant
([[
0
,
1
],
[
0
,
1
]],
dtype
=
tf
.
float32
)
y_true
=
tf
.
constant
([[
0
,
0
],
[
1
,
1
]])
sample_weight
=
tf
.
constant
([[
0.2
,
0.3
],
[
0.4
,
0.1
]])
m_obj
=
iou
.
PerClassIoU
(
num_classes
=
2
)
result
=
m_obj
(
y_true
,
y_pred
,
sample_weight
=
sample_weight
)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result
=
[
0.2
/
(
0.6
+
0.5
-
0.2
),
0.1
/
(
0.4
+
0.5
-
0.1
)]
self
.
assertAllClose
(
expected_result
,
result
,
atol
=
1e-3
)
def
test_zero_valid_entries
(
self
):
m_obj
=
iou
.
PerClassIoU
(
num_classes
=
2
)
self
.
assertAllClose
(
m_obj
.
result
(),
[
0
,
0
],
atol
=
1e-3
)
def
test_zero_and_non_zero_entries
(
self
):
y_pred
=
tf
.
constant
([
1
],
dtype
=
tf
.
float32
)
y_true
=
tf
.
constant
([
1
])
m_obj
=
iou
.
PerClassIoU
(
num_classes
=
2
)
result
=
m_obj
(
y_true
,
y_pred
)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result
=
[
0
,
1
/
(
1
+
1
-
1
)]
self
.
assertAllClose
(
expected_result
,
result
,
atol
=
1e-3
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment