Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
dcuai
dlexamples
Commits
0016b0a7
Commit
0016b0a7
authored
Jan 11, 2023
by
sunxx1
Browse files
Merge branch 'dtk22.04' into 'main'
Dtk22.04 See merge request dcutoolkit/deeplearing/dlexamples_new!49
parents
17bc28d5
7a382d5d
Changes
335
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2416 additions
and
0 deletions
+2416
-0
Keras/keras-cv/benchmarks/metrics/coco/recall_performance.py
Keras/keras-cv/benchmarks/metrics/coco/recall_performance.py
+102
-0
Keras/keras-cv/benchmarks/vectorization_strategy_benchmark.py
...s/keras-cv/benchmarks/vectorization_strategy_benchmark.py
+1240
-0
Keras/keras-cv/build_deps/build_pip_pkg.sh
Keras/keras-cv/build_deps/build_pip_pkg.sh
+88
-0
Keras/keras-cv/build_deps/configure.py
Keras/keras-cv/build_deps/configure.py
+174
-0
Keras/keras-cv/build_deps/tf_dependency/BUILD
Keras/keras-cv/build_deps/tf_dependency/BUILD
+0
-0
Keras/keras-cv/build_deps/tf_dependency/BUILD.tpl
Keras/keras-cv/build_deps/tf_dependency/BUILD.tpl
+18
-0
Keras/keras-cv/build_deps/tf_dependency/build_defs.bzl.tpl
Keras/keras-cv/build_deps/tf_dependency/build_defs.bzl.tpl
+4
-0
Keras/keras-cv/build_deps/tf_dependency/tf_configure.bzl
Keras/keras-cv/build_deps/tf_dependency/tf_configure.bzl
+244
-0
Keras/keras-cv/cloudbuild/Dockerfile
Keras/keras-cv/cloudbuild/Dockerfile
+4
-0
Keras/keras-cv/cloudbuild/README.md
Keras/keras-cv/cloudbuild/README.md
+50
-0
Keras/keras-cv/cloudbuild/cloudbuild.yaml
Keras/keras-cv/cloudbuild/cloudbuild.yaml
+79
-0
Keras/keras-cv/cloudbuild/requirements.txt
Keras/keras-cv/cloudbuild/requirements.txt
+10
-0
Keras/keras-cv/cloudbuild/unit_test_jobs.jsonnet
Keras/keras-cv/cloudbuild/unit_test_jobs.jsonnet
+41
-0
Keras/keras-cv/examples/layers/object_detection/anchor_generator_configuration.py
...layers/object_detection/anchor_generator_configuration.py
+52
-0
Keras/keras-cv/examples/layers/object_detection/demo_utils.py
...s/keras-cv/examples/layers/object_detection/demo_utils.py
+81
-0
Keras/keras-cv/examples/layers/preprocessing/bounding_box/demo_utils.py
.../examples/layers/preprocessing/bounding_box/demo_utils.py
+83
-0
Keras/keras-cv/examples/layers/preprocessing/bounding_box/mosaic_demo.py
...examples/layers/preprocessing/bounding_box/mosaic_demo.py
+35
-0
Keras/keras-cv/examples/layers/preprocessing/bounding_box/random_flip_demo.py
...les/layers/preprocessing/bounding_box/random_flip_demo.py
+35
-0
Keras/keras-cv/examples/layers/preprocessing/bounding_box/random_rotation_demo.py
...layers/preprocessing/bounding_box/random_rotation_demo.py
+37
-0
Keras/keras-cv/examples/layers/preprocessing/bounding_box/random_shear_demo.py
...es/layers/preprocessing/bounding_box/random_shear_demo.py
+39
-0
No files found.
Keras/keras-cv/benchmarks/metrics/coco/recall_performance.py
0 → 100644
View file @
0016b0a7
import
math
import
random
import
time
import
matplotlib.pyplot
as
plt
import
numpy
as
np
import
pandas
as
pd
import
seaborn
as
sns
import
tensorflow
as
tf
import
keras_cv
from
keras_cv.metrics
import
coco
def
produce_random_data
(
include_confidence
=
False
,
num_images
=
128
,
classes
=
20
):
"""Generates a fake list of bounding boxes for use in this test.
Returns:
a tensor list of size [128, 25, 5/6]. This represents 128 images, 25 bboxes
and 5/6 dimensions to represent each bbox depending on if confidence is
set.
"""
images
=
[]
for
_
in
range
(
num_images
):
num_boxes
=
math
.
floor
(
25
*
random
.
uniform
(
0
,
1
))
classes_in_image
=
np
.
floor
(
np
.
random
.
rand
(
num_boxes
,
1
)
*
classes
)
bboxes
=
np
.
random
.
rand
(
num_boxes
,
4
)
boxes
=
np
.
concatenate
([
bboxes
,
classes_in_image
],
axis
=-
1
)
if
include_confidence
:
confidence
=
np
.
random
.
rand
(
num_boxes
,
1
)
boxes
=
np
.
concatenate
([
boxes
,
confidence
],
axis
=-
1
)
images
.
append
(
keras_cv
.
utils
.
bounding_box
.
xywh_to_corners
(
tf
.
constant
(
boxes
,
dtype
=
tf
.
float32
)
)
)
images
=
[
keras_cv
.
bounding_box
.
pad_batch_to_shape
(
x
,
[
25
,
images
[
0
].
shape
[
1
]])
for
x
in
images
]
return
tf
.
stack
(
images
,
axis
=
0
)
y_true
=
produce_random_data
()
y_pred
=
produce_random_data
(
include_confidence
=
True
)
class_ids
=
list
(
range
(
20
))
n_images
=
[
128
,
256
,
512
,
512
+
256
,
1024
]
update_state_runtimes
=
[]
result_runtimes
=
[]
end_to_end_runtimes
=
[]
for
images
in
n_images
:
y_true
=
produce_random_data
(
num_images
=
images
)
y_pred
=
produce_random_data
(
num_images
=
images
,
include_confidence
=
True
)
metric
=
coco
.
COCORecall
(
class_ids
)
# warm up
metric
.
update_state
(
y_true
,
y_pred
)
metric
.
result
()
start
=
time
.
time
()
metric
.
update_state
(
y_true
,
y_pred
)
update_state_done
=
time
.
time
()
r
=
metric
.
result
()
end
=
time
.
time
()
update_state_runtimes
.
append
(
update_state_done
-
start
)
result_runtimes
.
append
(
end
-
update_state_done
)
end_to_end_runtimes
.
append
(
end
-
start
)
print
(
"end_to_end_runtimes"
,
end_to_end_runtimes
)
data
=
pd
.
DataFrame
(
{
"n_images"
:
n_images
,
"update_state_runtimes"
:
update_state_runtimes
,
"result_runtimes"
:
result_runtimes
,
"end_to_end_runtimes"
:
end_to_end_runtimes
,
}
)
sns
.
lineplot
(
data
=
data
,
x
=
"n_images"
,
y
=
"update_state_runtimes"
)
plt
.
xlabel
(
"Number of Images"
)
plt
.
ylabel
(
"update_state() runtime (seconds)"
)
plt
.
title
(
"Runtime of update_state()"
)
plt
.
show
()
sns
.
lineplot
(
data
=
data
,
x
=
"n_images"
,
y
=
"result_runtimes"
)
plt
.
xlabel
(
"Number of Images"
)
plt
.
ylabel
(
"result() runtime (seconds)"
)
plt
.
title
(
"Runtime of result()"
)
plt
.
show
()
sns
.
lineplot
(
data
=
data
,
x
=
"n_images"
,
y
=
"end_to_end_runtimes"
)
plt
.
xlabel
(
"Number of Images"
)
plt
.
ylabel
(
"End to end runtime (seconds)"
)
plt
.
title
(
"Runtimes of update_state() followed by result()"
)
plt
.
show
()
Keras/keras-cv/benchmarks/vectorization_strategy_benchmark.py
0 → 100644
View file @
0016b0a7
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Setup/utils
"""
import
time
import
matplotlib.pyplot
as
plt
import
tensorflow
as
tf
import
tensorflow.keras
as
keras
import
tensorflow.keras.layers
as
layers
from
tensorflow.keras
import
backend
from
keras_cv.utils
import
bounding_box
from
keras_cv.utils
import
fill_utils
def
single_rectangle_mask
(
corners
,
mask_shape
):
"""Computes masks of rectangles
Args:
corners: tensor of rectangle coordinates with shape (batch_size, 4) in
corners format (x0, y0, x1, y1).
mask_shape: a shape tuple as (width, height) indicating the output
width and height of masks.
Returns:
boolean masks with shape (batch_size, width, height) where True values
indicate positions within rectangle coordinates.
"""
# add broadcasting axes
corners
=
corners
[...,
tf
.
newaxis
,
tf
.
newaxis
]
# split coordinates
x0
=
corners
[
0
]
y0
=
corners
[
1
]
x1
=
corners
[
2
]
y1
=
corners
[
3
]
# repeat height and width
width
,
height
=
mask_shape
x0_rep
=
tf
.
repeat
(
x0
,
height
,
axis
=
0
)
y0_rep
=
tf
.
repeat
(
y0
,
width
,
axis
=
1
)
x1_rep
=
tf
.
repeat
(
x1
,
height
,
axis
=
0
)
y1_rep
=
tf
.
repeat
(
y1
,
width
,
axis
=
1
)
# range grid
range_row
=
tf
.
range
(
0
,
height
,
dtype
=
corners
.
dtype
)
range_col
=
tf
.
range
(
0
,
width
,
dtype
=
corners
.
dtype
)
range_row
=
range_row
[:,
tf
.
newaxis
]
range_col
=
range_col
[
tf
.
newaxis
,
:]
# boolean masks
mask_x0
=
tf
.
less_equal
(
x0_rep
,
range_col
)
mask_y0
=
tf
.
less_equal
(
y0_rep
,
range_row
)
mask_x1
=
tf
.
less
(
range_col
,
x1_rep
)
mask_y1
=
tf
.
less
(
range_row
,
y1_rep
)
masks
=
mask_x0
&
mask_y0
&
mask_x1
&
mask_y1
return
masks
def
fill_single_rectangle
(
image
,
centers_x
,
centers_y
,
widths
,
heights
,
fill_values
):
"""Fill rectangles with fill value into images.
Args:
images: Tensor of images to fill rectangles into.
centers_x: Tensor of positions of the rectangle centers on the x-axis.
centers_y: Tensor of positions of the rectangle centers on the y-axis.
widths: Tensor of widths of the rectangles
heights: Tensor of heights of the rectangles
fill_values: Tensor with same shape as images to get rectangle fill from.
Returns:
images with filled rectangles.
"""
images_shape
=
tf
.
shape
(
image
)
images_height
=
images_shape
[
0
]
images_width
=
images_shape
[
1
]
xywh
=
tf
.
stack
([
centers_x
,
centers_y
,
widths
,
heights
],
axis
=
0
)
xywh
=
tf
.
cast
(
xywh
,
tf
.
float32
)
corners
=
bounding_box
.
convert_to_corners
(
xywh
,
format
=
"coco"
)
mask_shape
=
(
images_width
,
images_height
)
is_rectangle
=
single_rectangle_mask
(
corners
,
mask_shape
)
is_rectangle
=
tf
.
expand_dims
(
is_rectangle
,
-
1
)
images
=
tf
.
where
(
is_rectangle
,
fill_values
,
image
)
return
images
"""
# Layer Implementations
## Fully Vectorized
"""
class
VectorizedRandomCutout
(
layers
.
Layer
):
def
__init__
(
self
,
height_factor
,
width_factor
,
fill_mode
=
"constant"
,
fill_value
=
0.0
,
seed
=
None
,
**
kwargs
,
):
super
().
__init__
(
**
kwargs
)
self
.
height_lower
,
self
.
height_upper
=
self
.
_parse_bounds
(
height_factor
)
self
.
width_lower
,
self
.
width_upper
=
self
.
_parse_bounds
(
width_factor
)
if
fill_mode
not
in
[
"gaussian_noise"
,
"constant"
]:
raise
ValueError
(
'`fill_mode` should be "gaussian_noise" '
f
'or "constant". Got `fill_mode`=
{
fill_mode
}
'
)
if
not
isinstance
(
self
.
height_lower
,
type
(
self
.
height_upper
)):
raise
ValueError
(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
height_lower
),
type
(
self
.
height_upper
)
)
)
if
not
isinstance
(
self
.
width_lower
,
type
(
self
.
width_upper
)):
raise
ValueError
(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
width_lower
),
type
(
self
.
width_upper
)
)
)
if
self
.
height_upper
<
self
.
height_lower
:
raise
ValueError
(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
height_factor
)
)
self
.
_height_is_float
=
isinstance
(
self
.
height_lower
,
float
)
if
self
.
_height_is_float
:
if
not
self
.
height_lower
>=
0.0
or
not
self
.
height_upper
<=
1.0
:
raise
ValueError
(
"`height_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
height_factor
)
)
if
self
.
width_upper
<
self
.
width_lower
:
raise
ValueError
(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
width_factor
)
)
self
.
_width_is_float
=
isinstance
(
self
.
width_lower
,
float
)
if
self
.
_width_is_float
:
if
not
self
.
width_lower
>=
0.0
or
not
self
.
width_upper
<=
1.0
:
raise
ValueError
(
"`width_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
width_factor
)
)
self
.
fill_mode
=
fill_mode
self
.
fill_value
=
fill_value
self
.
seed
=
seed
def
_parse_bounds
(
self
,
factor
):
if
isinstance
(
factor
,
(
tuple
,
list
)):
return
factor
[
0
],
factor
[
1
]
else
:
return
type
(
factor
)(
0
),
factor
@
tf
.
function
(
jit_compile
=
True
)
def
call
(
self
,
inputs
,
training
=
True
):
if
training
is
None
:
training
=
backend
.
learning_phase
()
augment
=
lambda
:
self
.
_random_cutout
(
inputs
)
no_augment
=
lambda
:
inputs
return
tf
.
cond
(
tf
.
cast
(
training
,
tf
.
bool
),
augment
,
no_augment
)
def
_random_cutout
(
self
,
inputs
):
"""Apply random cutout."""
center_x
,
center_y
=
self
.
_compute_rectangle_position
(
inputs
)
rectangle_height
,
rectangle_width
=
self
.
_compute_rectangle_size
(
inputs
)
rectangle_fill
=
self
.
_compute_rectangle_fill
(
inputs
)
inputs
=
fill_utils
.
fill_rectangle
(
inputs
,
center_x
,
center_y
,
rectangle_width
,
rectangle_height
,
rectangle_fill
,
)
return
inputs
def
_compute_rectangle_position
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
batch_size
,
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
input_shape
[
2
],
)
center_x
=
tf
.
random
.
uniform
(
shape
=
[
batch_size
],
minval
=
0
,
maxval
=
image_width
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
center_y
=
tf
.
random
.
uniform
(
shape
=
[
batch_size
],
minval
=
0
,
maxval
=
image_height
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
return
center_x
,
center_y
def
_compute_rectangle_size
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
batch_size
,
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
input_shape
[
2
],
)
height
=
tf
.
random
.
uniform
(
[
batch_size
],
minval
=
self
.
height_lower
,
maxval
=
self
.
height_upper
,
dtype
=
tf
.
float32
,
)
width
=
tf
.
random
.
uniform
(
[
batch_size
],
minval
=
self
.
width_lower
,
maxval
=
self
.
width_upper
,
dtype
=
tf
.
float32
,
)
if
self
.
_height_is_float
:
height
=
height
*
tf
.
cast
(
image_height
,
tf
.
float32
)
if
self
.
_width_is_float
:
width
=
width
*
tf
.
cast
(
image_width
,
tf
.
float32
)
height
=
tf
.
cast
(
tf
.
math
.
ceil
(
height
),
tf
.
int32
)
width
=
tf
.
cast
(
tf
.
math
.
ceil
(
width
),
tf
.
int32
)
height
=
tf
.
minimum
(
height
,
image_height
)
width
=
tf
.
minimum
(
width
,
image_width
)
return
height
,
width
def
_compute_rectangle_fill
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
if
self
.
fill_mode
==
"constant"
:
fill_value
=
tf
.
fill
(
input_shape
,
self
.
fill_value
)
else
:
# gaussian noise
fill_value
=
tf
.
random
.
normal
(
input_shape
)
return
fill_value
def
get_config
(
self
):
config
=
{
"height_factor"
:
self
.
height_factor
,
"width_factor"
:
self
.
width_factor
,
"fill_mode"
:
self
.
fill_mode
,
"fill_value"
:
self
.
fill_value
,
"seed"
:
self
.
seed
,
}
base_config
=
super
().
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
"""
## tf.map_fn
"""
class
MapFnRandomCutout
(
layers
.
Layer
):
def
__init__
(
self
,
height_factor
,
width_factor
,
fill_mode
=
"constant"
,
fill_value
=
0.0
,
seed
=
None
,
**
kwargs
,
):
super
().
__init__
(
**
kwargs
)
self
.
height_lower
,
self
.
height_upper
=
self
.
_parse_bounds
(
height_factor
)
self
.
width_lower
,
self
.
width_upper
=
self
.
_parse_bounds
(
width_factor
)
if
fill_mode
not
in
[
"gaussian_noise"
,
"constant"
]:
raise
ValueError
(
'`fill_mode` should be "gaussian_noise" '
f
'or "constant". Got `fill_mode`=
{
fill_mode
}
'
)
if
not
isinstance
(
self
.
height_lower
,
type
(
self
.
height_upper
)):
raise
ValueError
(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
height_lower
),
type
(
self
.
height_upper
)
)
)
if
not
isinstance
(
self
.
width_lower
,
type
(
self
.
width_upper
)):
raise
ValueError
(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
width_lower
),
type
(
self
.
width_upper
)
)
)
if
self
.
height_upper
<
self
.
height_lower
:
raise
ValueError
(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
height_factor
)
)
self
.
_height_is_float
=
isinstance
(
self
.
height_lower
,
float
)
if
self
.
_height_is_float
:
if
not
self
.
height_lower
>=
0.0
or
not
self
.
height_upper
<=
1.0
:
raise
ValueError
(
"`height_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
height_factor
)
)
if
self
.
width_upper
<
self
.
width_lower
:
raise
ValueError
(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
width_factor
)
)
self
.
_width_is_float
=
isinstance
(
self
.
width_lower
,
float
)
if
self
.
_width_is_float
:
if
not
self
.
width_lower
>=
0.0
or
not
self
.
width_upper
<=
1.0
:
raise
ValueError
(
"`width_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
width_factor
)
)
self
.
fill_mode
=
fill_mode
self
.
fill_value
=
fill_value
self
.
seed
=
seed
def
_parse_bounds
(
self
,
factor
):
if
isinstance
(
factor
,
(
tuple
,
list
)):
return
factor
[
0
],
factor
[
1
]
else
:
return
type
(
factor
)(
0
),
factor
@
tf
.
function
(
jit_compile
=
True
)
def
call
(
self
,
inputs
,
training
=
True
):
augment
=
lambda
:
tf
.
map_fn
(
self
.
_random_cutout
,
inputs
)
no_augment
=
lambda
:
inputs
return
tf
.
cond
(
tf
.
cast
(
training
,
tf
.
bool
),
augment
,
no_augment
)
def
_random_cutout
(
self
,
input
):
center_x
,
center_y
=
self
.
_compute_rectangle_position
(
input
)
rectangle_height
,
rectangle_width
=
self
.
_compute_rectangle_size
(
input
)
rectangle_fill
=
self
.
_compute_rectangle_fill
(
input
)
input
=
fill_single_rectangle
(
input
,
center_x
,
center_y
,
rectangle_width
,
rectangle_height
,
rectangle_fill
,
)
return
input
def
_compute_rectangle_position
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
)
center_x
=
tf
.
random
.
uniform
(
shape
=
[],
minval
=
0
,
maxval
=
image_width
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
center_y
=
tf
.
random
.
uniform
(
shape
=
[],
minval
=
0
,
maxval
=
image_height
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
return
center_x
,
center_y
def
_compute_rectangle_size
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
)
height
=
tf
.
random
.
uniform
(
[],
minval
=
self
.
height_lower
,
maxval
=
self
.
height_upper
,
dtype
=
tf
.
float32
,
)
width
=
tf
.
random
.
uniform
(
[],
minval
=
self
.
width_lower
,
maxval
=
self
.
width_upper
,
dtype
=
tf
.
float32
,
)
if
self
.
_height_is_float
:
height
=
height
*
tf
.
cast
(
image_height
,
tf
.
float32
)
if
self
.
_width_is_float
:
width
=
width
*
tf
.
cast
(
image_width
,
tf
.
float32
)
height
=
tf
.
cast
(
tf
.
math
.
ceil
(
height
),
tf
.
int32
)
width
=
tf
.
cast
(
tf
.
math
.
ceil
(
width
),
tf
.
int32
)
height
=
tf
.
minimum
(
height
,
image_height
)
width
=
tf
.
minimum
(
width
,
image_width
)
return
height
,
width
def
_compute_rectangle_fill
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
if
self
.
fill_mode
==
"constant"
:
fill_value
=
tf
.
fill
(
input_shape
,
self
.
fill_value
)
else
:
# gaussian noise
fill_value
=
tf
.
random
.
normal
(
input_shape
)
return
fill_value
def
get_config
(
self
):
config
=
{
"height_factor"
:
self
.
height_factor
,
"width_factor"
:
self
.
width_factor
,
"fill_mode"
:
self
.
fill_mode
,
"fill_value"
:
self
.
fill_value
,
"seed"
:
self
.
seed
,
}
base_config
=
super
().
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
"""
## tf.vectorized_map
"""
class
VMapRandomCutout
(
layers
.
Layer
):
def
__init__
(
self
,
height_factor
,
width_factor
,
fill_mode
=
"constant"
,
fill_value
=
0.0
,
seed
=
None
,
**
kwargs
,
):
super
().
__init__
(
**
kwargs
)
self
.
height_lower
,
self
.
height_upper
=
self
.
_parse_bounds
(
height_factor
)
self
.
width_lower
,
self
.
width_upper
=
self
.
_parse_bounds
(
width_factor
)
if
fill_mode
not
in
[
"gaussian_noise"
,
"constant"
]:
raise
ValueError
(
'`fill_mode` should be "gaussian_noise" '
f
'or "constant". Got `fill_mode`=
{
fill_mode
}
'
)
if
not
isinstance
(
self
.
height_lower
,
type
(
self
.
height_upper
)):
raise
ValueError
(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
height_lower
),
type
(
self
.
height_upper
)
)
)
if
not
isinstance
(
self
.
width_lower
,
type
(
self
.
width_upper
)):
raise
ValueError
(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
width_lower
),
type
(
self
.
width_upper
)
)
)
if
self
.
height_upper
<
self
.
height_lower
:
raise
ValueError
(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
height_factor
)
)
self
.
_height_is_float
=
isinstance
(
self
.
height_lower
,
float
)
if
self
.
_height_is_float
:
if
not
self
.
height_lower
>=
0.0
or
not
self
.
height_upper
<=
1.0
:
raise
ValueError
(
"`height_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
height_factor
)
)
if
self
.
width_upper
<
self
.
width_lower
:
raise
ValueError
(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
width_factor
)
)
self
.
_width_is_float
=
isinstance
(
self
.
width_lower
,
float
)
if
self
.
_width_is_float
:
if
not
self
.
width_lower
>=
0.0
or
not
self
.
width_upper
<=
1.0
:
raise
ValueError
(
"`width_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
width_factor
)
)
self
.
fill_mode
=
fill_mode
self
.
fill_value
=
fill_value
self
.
seed
=
seed
def
_parse_bounds
(
self
,
factor
):
if
isinstance
(
factor
,
(
tuple
,
list
)):
return
factor
[
0
],
factor
[
1
]
else
:
return
type
(
factor
)(
0
),
factor
@
tf
.
function
(
jit_compile
=
True
)
def
call
(
self
,
inputs
,
training
=
True
):
augment
=
lambda
:
tf
.
vectorized_map
(
self
.
_random_cutout
,
inputs
)
no_augment
=
lambda
:
inputs
return
tf
.
cond
(
tf
.
cast
(
training
,
tf
.
bool
),
augment
,
no_augment
)
def
_random_cutout
(
self
,
input
):
center_x
,
center_y
=
self
.
_compute_rectangle_position
(
input
)
rectangle_height
,
rectangle_width
=
self
.
_compute_rectangle_size
(
input
)
rectangle_fill
=
self
.
_compute_rectangle_fill
(
input
)
input
=
fill_single_rectangle
(
input
,
center_x
,
center_y
,
rectangle_width
,
rectangle_height
,
rectangle_fill
,
)
return
input
def
_compute_rectangle_position
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
)
center_x
=
tf
.
random
.
uniform
(
shape
=
[],
minval
=
0
,
maxval
=
image_width
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
center_y
=
tf
.
random
.
uniform
(
shape
=
[],
minval
=
0
,
maxval
=
image_height
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
return
center_x
,
center_y
def
_compute_rectangle_size
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
)
height
=
tf
.
random
.
uniform
(
[],
minval
=
self
.
height_lower
,
maxval
=
self
.
height_upper
,
dtype
=
tf
.
float32
,
)
width
=
tf
.
random
.
uniform
(
[],
minval
=
self
.
width_lower
,
maxval
=
self
.
width_upper
,
dtype
=
tf
.
float32
,
)
if
self
.
_height_is_float
:
height
=
height
*
tf
.
cast
(
image_height
,
tf
.
float32
)
if
self
.
_width_is_float
:
width
=
width
*
tf
.
cast
(
image_width
,
tf
.
float32
)
height
=
tf
.
cast
(
tf
.
math
.
ceil
(
height
),
tf
.
int32
)
width
=
tf
.
cast
(
tf
.
math
.
ceil
(
width
),
tf
.
int32
)
height
=
tf
.
minimum
(
height
,
image_height
)
width
=
tf
.
minimum
(
width
,
image_width
)
return
height
,
width
def
_compute_rectangle_fill
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
if
self
.
fill_mode
==
"constant"
:
fill_value
=
tf
.
fill
(
input_shape
,
self
.
fill_value
)
else
:
# gaussian noise
fill_value
=
tf
.
random
.
normal
(
input_shape
)
return
fill_value
def
get_config
(
self
):
config
=
{
"height_factor"
:
self
.
height_factor
,
"width_factor"
:
self
.
width_factor
,
"fill_mode"
:
self
.
fill_mode
,
"fill_value"
:
self
.
fill_value
,
"seed"
:
self
.
seed
,
}
base_config
=
super
().
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
"""
JIT COMPILED
# Layer Implementations
## Fully Vectorized
"""
class
JITVectorizedRandomCutout
(
layers
.
Layer
):
def
__init__
(
self
,
height_factor
,
width_factor
,
fill_mode
=
"constant"
,
fill_value
=
0.0
,
seed
=
None
,
**
kwargs
,
):
super
().
__init__
(
**
kwargs
)
self
.
height_lower
,
self
.
height_upper
=
self
.
_parse_bounds
(
height_factor
)
self
.
width_lower
,
self
.
width_upper
=
self
.
_parse_bounds
(
width_factor
)
if
fill_mode
not
in
[
"gaussian_noise"
,
"constant"
]:
raise
ValueError
(
'`fill_mode` should be "gaussian_noise" '
f
'or "constant". Got `fill_mode`=
{
fill_mode
}
'
)
if
not
isinstance
(
self
.
height_lower
,
type
(
self
.
height_upper
)):
raise
ValueError
(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
height_lower
),
type
(
self
.
height_upper
)
)
)
if
not
isinstance
(
self
.
width_lower
,
type
(
self
.
width_upper
)):
raise
ValueError
(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
width_lower
),
type
(
self
.
width_upper
)
)
)
if
self
.
height_upper
<
self
.
height_lower
:
raise
ValueError
(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
height_factor
)
)
self
.
_height_is_float
=
isinstance
(
self
.
height_lower
,
float
)
if
self
.
_height_is_float
:
if
not
self
.
height_lower
>=
0.0
or
not
self
.
height_upper
<=
1.0
:
raise
ValueError
(
"`height_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
height_factor
)
)
if
self
.
width_upper
<
self
.
width_lower
:
raise
ValueError
(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
width_factor
)
)
self
.
_width_is_float
=
isinstance
(
self
.
width_lower
,
float
)
if
self
.
_width_is_float
:
if
not
self
.
width_lower
>=
0.0
or
not
self
.
width_upper
<=
1.0
:
raise
ValueError
(
"`width_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
width_factor
)
)
self
.
fill_mode
=
fill_mode
self
.
fill_value
=
fill_value
self
.
seed
=
seed
def
_parse_bounds
(
self
,
factor
):
if
isinstance
(
factor
,
(
tuple
,
list
)):
return
factor
[
0
],
factor
[
1
]
else
:
return
type
(
factor
)(
0
),
factor
@
tf
.
function
(
jit_compile
=
True
)
def
call
(
self
,
inputs
,
training
=
True
):
if
training
is
None
:
training
=
backend
.
learning_phase
()
augment
=
lambda
:
self
.
_random_cutout
(
inputs
)
no_augment
=
lambda
:
inputs
return
tf
.
cond
(
tf
.
cast
(
training
,
tf
.
bool
),
augment
,
no_augment
)
def
_random_cutout
(
self
,
inputs
):
"""Apply random cutout."""
center_x
,
center_y
=
self
.
_compute_rectangle_position
(
inputs
)
rectangle_height
,
rectangle_width
=
self
.
_compute_rectangle_size
(
inputs
)
rectangle_fill
=
self
.
_compute_rectangle_fill
(
inputs
)
inputs
=
fill_utils
.
fill_rectangle
(
inputs
,
center_x
,
center_y
,
rectangle_width
,
rectangle_height
,
rectangle_fill
,
)
return
inputs
def
_compute_rectangle_position
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
batch_size
,
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
input_shape
[
2
],
)
center_x
=
tf
.
random
.
uniform
(
shape
=
[
batch_size
],
minval
=
0
,
maxval
=
image_width
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
center_y
=
tf
.
random
.
uniform
(
shape
=
[
batch_size
],
minval
=
0
,
maxval
=
image_height
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
return
center_x
,
center_y
def
_compute_rectangle_size
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
batch_size
,
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
input_shape
[
2
],
)
height
=
tf
.
random
.
uniform
(
[
batch_size
],
minval
=
self
.
height_lower
,
maxval
=
self
.
height_upper
,
dtype
=
tf
.
float32
,
)
width
=
tf
.
random
.
uniform
(
[
batch_size
],
minval
=
self
.
width_lower
,
maxval
=
self
.
width_upper
,
dtype
=
tf
.
float32
,
)
if
self
.
_height_is_float
:
height
=
height
*
tf
.
cast
(
image_height
,
tf
.
float32
)
if
self
.
_width_is_float
:
width
=
width
*
tf
.
cast
(
image_width
,
tf
.
float32
)
height
=
tf
.
cast
(
tf
.
math
.
ceil
(
height
),
tf
.
int32
)
width
=
tf
.
cast
(
tf
.
math
.
ceil
(
width
),
tf
.
int32
)
height
=
tf
.
minimum
(
height
,
image_height
)
width
=
tf
.
minimum
(
width
,
image_width
)
return
height
,
width
def
_compute_rectangle_fill
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
if
self
.
fill_mode
==
"constant"
:
fill_value
=
tf
.
fill
(
input_shape
,
self
.
fill_value
)
else
:
# gaussian noise
fill_value
=
tf
.
random
.
normal
(
input_shape
)
return
fill_value
def
get_config
(
self
):
config
=
{
"height_factor"
:
self
.
height_factor
,
"width_factor"
:
self
.
width_factor
,
"fill_mode"
:
self
.
fill_mode
,
"fill_value"
:
self
.
fill_value
,
"seed"
:
self
.
seed
,
}
base_config
=
super
().
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
"""
## tf.map_fn
"""
class
JITMapFnRandomCutout
(
layers
.
Layer
):
def
__init__
(
self
,
height_factor
,
width_factor
,
fill_mode
=
"constant"
,
fill_value
=
0.0
,
seed
=
None
,
**
kwargs
,
):
super
().
__init__
(
**
kwargs
)
self
.
height_lower
,
self
.
height_upper
=
self
.
_parse_bounds
(
height_factor
)
self
.
width_lower
,
self
.
width_upper
=
self
.
_parse_bounds
(
width_factor
)
if
fill_mode
not
in
[
"gaussian_noise"
,
"constant"
]:
raise
ValueError
(
'`fill_mode` should be "gaussian_noise" '
f
'or "constant". Got `fill_mode`=
{
fill_mode
}
'
)
if
not
isinstance
(
self
.
height_lower
,
type
(
self
.
height_upper
)):
raise
ValueError
(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
height_lower
),
type
(
self
.
height_upper
)
)
)
if
not
isinstance
(
self
.
width_lower
,
type
(
self
.
width_upper
)):
raise
ValueError
(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
width_lower
),
type
(
self
.
width_upper
)
)
)
if
self
.
height_upper
<
self
.
height_lower
:
raise
ValueError
(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
height_factor
)
)
self
.
_height_is_float
=
isinstance
(
self
.
height_lower
,
float
)
if
self
.
_height_is_float
:
if
not
self
.
height_lower
>=
0.0
or
not
self
.
height_upper
<=
1.0
:
raise
ValueError
(
"`height_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
height_factor
)
)
if
self
.
width_upper
<
self
.
width_lower
:
raise
ValueError
(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
width_factor
)
)
self
.
_width_is_float
=
isinstance
(
self
.
width_lower
,
float
)
if
self
.
_width_is_float
:
if
not
self
.
width_lower
>=
0.0
or
not
self
.
width_upper
<=
1.0
:
raise
ValueError
(
"`width_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
width_factor
)
)
self
.
fill_mode
=
fill_mode
self
.
fill_value
=
fill_value
self
.
seed
=
seed
def
_parse_bounds
(
self
,
factor
):
if
isinstance
(
factor
,
(
tuple
,
list
)):
return
factor
[
0
],
factor
[
1
]
else
:
return
type
(
factor
)(
0
),
factor
@
tf
.
function
(
jit_compile
=
True
)
def
call
(
self
,
inputs
,
training
=
True
):
augment
=
lambda
:
tf
.
map_fn
(
self
.
_random_cutout
,
inputs
)
no_augment
=
lambda
:
inputs
return
tf
.
cond
(
tf
.
cast
(
training
,
tf
.
bool
),
augment
,
no_augment
)
def
_random_cutout
(
self
,
input
):
center_x
,
center_y
=
self
.
_compute_rectangle_position
(
input
)
rectangle_height
,
rectangle_width
=
self
.
_compute_rectangle_size
(
input
)
rectangle_fill
=
self
.
_compute_rectangle_fill
(
input
)
input
=
fill_single_rectangle
(
input
,
center_x
,
center_y
,
rectangle_width
,
rectangle_height
,
rectangle_fill
,
)
return
input
def
_compute_rectangle_position
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
)
center_x
=
tf
.
random
.
uniform
(
shape
=
[],
minval
=
0
,
maxval
=
image_width
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
center_y
=
tf
.
random
.
uniform
(
shape
=
[],
minval
=
0
,
maxval
=
image_height
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
return
center_x
,
center_y
def
_compute_rectangle_size
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
)
height
=
tf
.
random
.
uniform
(
[],
minval
=
self
.
height_lower
,
maxval
=
self
.
height_upper
,
dtype
=
tf
.
float32
,
)
width
=
tf
.
random
.
uniform
(
[],
minval
=
self
.
width_lower
,
maxval
=
self
.
width_upper
,
dtype
=
tf
.
float32
,
)
if
self
.
_height_is_float
:
height
=
height
*
tf
.
cast
(
image_height
,
tf
.
float32
)
if
self
.
_width_is_float
:
width
=
width
*
tf
.
cast
(
image_width
,
tf
.
float32
)
height
=
tf
.
cast
(
tf
.
math
.
ceil
(
height
),
tf
.
int32
)
width
=
tf
.
cast
(
tf
.
math
.
ceil
(
width
),
tf
.
int32
)
height
=
tf
.
minimum
(
height
,
image_height
)
width
=
tf
.
minimum
(
width
,
image_width
)
return
height
,
width
def
_compute_rectangle_fill
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
if
self
.
fill_mode
==
"constant"
:
fill_value
=
tf
.
fill
(
input_shape
,
self
.
fill_value
)
else
:
# gaussian noise
fill_value
=
tf
.
random
.
normal
(
input_shape
)
return
fill_value
def
get_config
(
self
):
config
=
{
"height_factor"
:
self
.
height_factor
,
"width_factor"
:
self
.
width_factor
,
"fill_mode"
:
self
.
fill_mode
,
"fill_value"
:
self
.
fill_value
,
"seed"
:
self
.
seed
,
}
base_config
=
super
().
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
"""
## tf.vectorized_map
"""
class
JITVMapRandomCutout
(
layers
.
Layer
):
def
__init__
(
self
,
height_factor
,
width_factor
,
fill_mode
=
"constant"
,
fill_value
=
0.0
,
seed
=
None
,
**
kwargs
,
):
super
().
__init__
(
**
kwargs
)
self
.
height_lower
,
self
.
height_upper
=
self
.
_parse_bounds
(
height_factor
)
self
.
width_lower
,
self
.
width_upper
=
self
.
_parse_bounds
(
width_factor
)
if
fill_mode
not
in
[
"gaussian_noise"
,
"constant"
]:
raise
ValueError
(
'`fill_mode` should be "gaussian_noise" '
f
'or "constant". Got `fill_mode`=
{
fill_mode
}
'
)
if
not
isinstance
(
self
.
height_lower
,
type
(
self
.
height_upper
)):
raise
ValueError
(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
height_lower
),
type
(
self
.
height_upper
)
)
)
if
not
isinstance
(
self
.
width_lower
,
type
(
self
.
width_upper
)):
raise
ValueError
(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}"
.
format
(
type
(
self
.
width_lower
),
type
(
self
.
width_upper
)
)
)
if
self
.
height_upper
<
self
.
height_lower
:
raise
ValueError
(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
height_factor
)
)
self
.
_height_is_float
=
isinstance
(
self
.
height_lower
,
float
)
if
self
.
_height_is_float
:
if
not
self
.
height_lower
>=
0.0
or
not
self
.
height_upper
<=
1.0
:
raise
ValueError
(
"`height_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
height_factor
)
)
if
self
.
width_upper
<
self
.
width_lower
:
raise
ValueError
(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}"
.
format
(
width_factor
)
)
self
.
_width_is_float
=
isinstance
(
self
.
width_lower
,
float
)
if
self
.
_width_is_float
:
if
not
self
.
width_lower
>=
0.0
or
not
self
.
width_upper
<=
1.0
:
raise
ValueError
(
"`width_factor` must have values between [0, 1] "
"when is float, got {}"
.
format
(
width_factor
)
)
self
.
fill_mode
=
fill_mode
self
.
fill_value
=
fill_value
self
.
seed
=
seed
def
_parse_bounds
(
self
,
factor
):
if
isinstance
(
factor
,
(
tuple
,
list
)):
return
factor
[
0
],
factor
[
1
]
else
:
return
type
(
factor
)(
0
),
factor
@
tf
.
function
(
jit_compile
=
True
)
def
call
(
self
,
inputs
,
training
=
True
):
augment
=
lambda
:
tf
.
vectorized_map
(
self
.
_random_cutout
,
inputs
)
no_augment
=
lambda
:
inputs
return
tf
.
cond
(
tf
.
cast
(
training
,
tf
.
bool
),
augment
,
no_augment
)
def
_random_cutout
(
self
,
input
):
center_x
,
center_y
=
self
.
_compute_rectangle_position
(
input
)
rectangle_height
,
rectangle_width
=
self
.
_compute_rectangle_size
(
input
)
rectangle_fill
=
self
.
_compute_rectangle_fill
(
input
)
input
=
fill_single_rectangle
(
input
,
center_x
,
center_y
,
rectangle_width
,
rectangle_height
,
rectangle_fill
,
)
return
input
def
_compute_rectangle_position
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
)
center_x
=
tf
.
random
.
uniform
(
shape
=
[],
minval
=
0
,
maxval
=
image_width
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
center_y
=
tf
.
random
.
uniform
(
shape
=
[],
minval
=
0
,
maxval
=
image_height
,
dtype
=
tf
.
int32
,
seed
=
self
.
seed
,
)
return
center_x
,
center_y
def
_compute_rectangle_size
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
image_height
,
image_width
=
(
input_shape
[
0
],
input_shape
[
1
],
)
height
=
tf
.
random
.
uniform
(
[],
minval
=
self
.
height_lower
,
maxval
=
self
.
height_upper
,
dtype
=
tf
.
float32
,
)
width
=
tf
.
random
.
uniform
(
[],
minval
=
self
.
width_lower
,
maxval
=
self
.
width_upper
,
dtype
=
tf
.
float32
,
)
if
self
.
_height_is_float
:
height
=
height
*
tf
.
cast
(
image_height
,
tf
.
float32
)
if
self
.
_width_is_float
:
width
=
width
*
tf
.
cast
(
image_width
,
tf
.
float32
)
height
=
tf
.
cast
(
tf
.
math
.
ceil
(
height
),
tf
.
int32
)
width
=
tf
.
cast
(
tf
.
math
.
ceil
(
width
),
tf
.
int32
)
height
=
tf
.
minimum
(
height
,
image_height
)
width
=
tf
.
minimum
(
width
,
image_width
)
return
height
,
width
def
_compute_rectangle_fill
(
self
,
inputs
):
input_shape
=
tf
.
shape
(
inputs
)
if
self
.
fill_mode
==
"constant"
:
fill_value
=
tf
.
fill
(
input_shape
,
self
.
fill_value
)
else
:
# gaussian noise
fill_value
=
tf
.
random
.
normal
(
input_shape
)
return
fill_value
def
get_config
(
self
):
config
=
{
"height_factor"
:
self
.
height_factor
,
"width_factor"
:
self
.
width_factor
,
"fill_mode"
:
self
.
fill_mode
,
"fill_value"
:
self
.
fill_value
,
"seed"
:
self
.
seed
,
}
base_config
=
super
().
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
"""
# Benchmarking
"""
(
x_train
,
_
),
_
=
keras
.
datasets
.
cifar10
.
load_data
()
x_train
=
x_train
.
astype
(
float
)
x_train
.
shape
images
=
[]
num_images
=
[
1000
,
2000
,
5000
,
10000
,
25000
,
37500
,
50000
]
results
=
{}
for
aug
in
[
VectorizedRandomCutout
,
VMapRandomCutout
,
MapFnRandomCutout
,
JITVectorizedRandomCutout
,
JITVMapRandomCutout
,
JITMapFnRandomCutout
,
]:
c
=
aug
.
__name__
layer
=
aug
(
0.2
,
0.2
)
runtimes
=
[]
print
(
f
"Timing
{
c
}
"
)
for
n_images
in
num_images
:
# warmup
layer
(
x_train
[:
n_images
])
t0
=
time
.
time
()
r1
=
layer
(
x_train
[:
n_images
])
t1
=
time
.
time
()
runtimes
.
append
(
t1
-
t0
)
print
(
f
"Runtime for
{
c
}
, n_images=
{
n_images
}
:
{
t1
-
t0
}
"
)
results
[
c
]
=
runtimes
plt
.
figure
()
for
key
in
results
:
plt
.
plot
(
num_images
,
results
[
key
],
label
=
key
)
plt
.
xlabel
(
"Number images"
)
plt
.
ylabel
(
"Runtime (seconds)"
)
plt
.
legend
()
plt
.
show
()
"""
# Sanity check
all of these should have comparable outputs
"""
images
=
[]
for
aug
in
[
VectorizedRandomCutout
,
VMapRandomCutout
,
MapFnRandomCutout
]:
layer
=
aug
(
0.5
,
0.5
)
images
.
append
(
layer
(
x_train
[:
3
]))
images
=
[
y
for
x
in
images
for
y
in
x
]
plt
.
figure
(
figsize
=
(
8
,
8
))
for
i
in
range
(
9
):
plt
.
subplot
(
3
,
3
,
i
+
1
)
plt
.
imshow
(
images
[
i
].
numpy
().
astype
(
"uint8"
))
plt
.
axis
(
"off"
)
plt
.
show
()
"""
# Extra notes
## Warnings
it would be really annoying as a user to use an official keras_cv component and get
warned that "RandomUniform" or "RandomUniformInt" inside pfor may not get the same
output.
"""
Keras/keras-cv/build_deps/build_pip_pkg.sh
0 → 100644
View file @
0016b0a7
#!/usr/bin/env bash
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Builds a wheel of KerasCV for Pip. Requires Bazel.
# Adapted from https://github.com/tensorflow/addons/blob/master/build_deps/build_pip_pkg.sh
set
-e
set
-x
PLATFORM
=
"
$(
uname
-s
|
tr
'A-Z'
'a-z'
)
"
function
is_windows
()
{
if
[[
"
${
PLATFORM
}
"
=
~
(
cygwin|mingw32|mingw64|msys
)
_nt
*
]]
;
then
true
else
false
fi
}
if
is_windows
;
then
PIP_FILE_PREFIX
=
"bazel-bin/build_pip_pkg.exe.runfiles/__main__/"
else
PIP_FILE_PREFIX
=
"bazel-bin/build_pip_pkg.runfiles/__main__/"
fi
function
main
()
{
while
[[
!
-z
"
${
1
}
"
]]
;
do
if
[[
${
1
}
==
"make"
]]
;
then
echo
"Using Makefile to build pip package."
PIP_FILE_PREFIX
=
""
else
DEST
=
${
1
}
fi
shift
done
if
[[
-z
${
DEST
}
]]
;
then
echo
"No destination dir provided"
exit
1
fi
# Create the directory, then do dirname on a non-existent file inside it to
# give us an absolute paths with tilde characters resolved to the destination
# directory.
mkdir
-p
${
DEST
}
if
[[
${
PLATFORM
}
==
"darwin"
]]
;
then
DEST
=
$(
pwd
-P
)
/
${
DEST
}
else
DEST
=
$(
readlink
-f
"
${
DEST
}
"
)
fi
echo
"=== destination directory:
${
DEST
}
"
TMPDIR
=
$(
mktemp
-d
-t
tmp.XXXXXXXXXX
)
echo
$(
date
)
:
"=== Using tmpdir:
${
TMPDIR
}
"
echo
"=== Copy KerasCV Custom op files"
cp
${
PIP_FILE_PREFIX
}
setup.py
"
${
TMPDIR
}
"
cp
${
PIP_FILE_PREFIX
}
MANIFEST.in
"
${
TMPDIR
}
"
cp
${
PIP_FILE_PREFIX
}
README.md
"
${
TMPDIR
}
"
cp
${
PIP_FILE_PREFIX
}
LICENSE
"
${
TMPDIR
}
"
rsync
-avm
-L
--exclude
=
'*_test.py'
${
PIP_FILE_PREFIX
}
keras_cv
"
${
TMPDIR
}
"
pushd
${
TMPDIR
}
echo
$(
date
)
:
"=== Building wheel"
python3 setup.py bdist_wheel
>
/dev/null
cp
dist/
*
.whl
"
${
DEST
}
"
popd
rm
-rf
${
TMPDIR
}
echo
$(
date
)
:
"=== Output wheel file is in:
${
DEST
}
"
}
main
"
$@
"
Keras/keras-cv/build_deps/configure.py
0 → 100644
View file @
0016b0a7
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Usage: python configure.py
"""Configures local environment to prepare for building KerasCV from source."""
import
logging
import
os
import
pathlib
import
platform
import
tensorflow
as
tf
from
packaging.version
import
Version
_TFA_BAZELRC
=
".bazelrc"
# Writes variables to bazelrc file
def
write
(
line
):
with
open
(
_TFA_BAZELRC
,
"a"
)
as
f
:
f
.
write
(
line
+
"
\n
"
)
def
write_action_env
(
var_name
,
var
):
write
(
'build --action_env {}="{}"'
.
format
(
var_name
,
var
))
def
is_macos
():
return
platform
.
system
()
==
"Darwin"
def
is_windows
():
return
platform
.
system
()
==
"Windows"
def
is_linux
():
return
platform
.
system
()
==
"Linux"
def
is_raspi_arm
():
return
os
.
uname
()[
4
]
==
"armv7l"
or
os
.
uname
()[
4
]
==
"aarch64"
def
is_linux_ppc64le
():
return
is_linux
()
and
platform
.
machine
()
==
"ppc64le"
def
is_linux_x86_64
():
return
is_linux
()
and
platform
.
machine
()
==
"x86_64"
def
is_linux_arm
():
return
is_linux
()
and
platform
.
machine
()
==
"arm"
def
is_linux_aarch64
():
return
is_linux
()
and
platform
.
machine
()
==
"aarch64"
def
is_linux_s390x
():
return
is_linux
()
and
platform
.
machine
()
==
"s390x"
def
get_tf_header_dir
():
import
tensorflow
as
tf
tf_header_dir
=
tf
.
sysconfig
.
get_compile_flags
()[
0
][
2
:]
if
is_windows
():
tf_header_dir
=
tf_header_dir
.
replace
(
"
\\
"
,
"/"
)
return
tf_header_dir
def
get_cpp_version
():
cpp_version
=
"c++14"
if
Version
(
tf
.
__version__
)
>=
Version
(
"2.10"
):
cpp_version
=
"c++17"
return
cpp_version
def
get_tf_shared_lib_dir
():
import
tensorflow
as
tf
# OS Specific parsing
if
is_windows
():
tf_shared_lib_dir
=
tf
.
sysconfig
.
get_compile_flags
()[
0
][
2
:
-
7
]
+
"python"
return
tf_shared_lib_dir
.
replace
(
"
\\
"
,
"/"
)
elif
is_raspi_arm
():
return
tf
.
sysconfig
.
get_compile_flags
()[
0
][
2
:
-
7
]
+
"python"
else
:
return
tf
.
sysconfig
.
get_link_flags
()[
0
][
2
:]
# Converts the linkflag namespec to the full shared library name
def
get_shared_lib_name
():
import
tensorflow
as
tf
namespec
=
tf
.
sysconfig
.
get_link_flags
()
if
is_macos
():
# MacOS
return
"lib"
+
namespec
[
1
][
2
:]
+
".dylib"
elif
is_windows
():
# Windows
return
"_pywrap_tensorflow_internal.lib"
elif
is_raspi_arm
():
# The below command for linux would return an empty list
return
"_pywrap_tensorflow_internal.so"
else
:
# Linux
return
namespec
[
1
][
3
:]
def
create_build_configuration
():
print
()
print
(
"Configuring KerasCV to be built from source..."
)
if
os
.
path
.
isfile
(
_TFA_BAZELRC
):
os
.
remove
(
_TFA_BAZELRC
)
logging
.
disable
(
logging
.
WARNING
)
write_action_env
(
"TF_HEADER_DIR"
,
get_tf_header_dir
())
write_action_env
(
"TF_SHARED_LIBRARY_DIR"
,
get_tf_shared_lib_dir
())
write_action_env
(
"TF_SHARED_LIBRARY_NAME"
,
get_shared_lib_name
())
write_action_env
(
"TF_CXX11_ABI_FLAG"
,
tf
.
sysconfig
.
CXX11_ABI_FLAG
)
# This should be replaced with a call to tf.sysconfig if it's added
write_action_env
(
"TF_CPLUSPLUS_VER"
,
get_cpp_version
())
write
(
"build --spawn_strategy=standalone"
)
write
(
"build --strategy=Genrule=standalone"
)
write
(
"build --experimental_repo_remote_exec"
)
write
(
"build -c opt"
)
write
(
"build --cxxopt="
+
'"-D_GLIBCXX_USE_CXX11_ABI="'
+
str
(
tf
.
sysconfig
.
CXX11_ABI_FLAG
)
)
if
is_windows
():
write
(
"build --config=windows"
)
write
(
"build:windows --enable_runfiles"
)
write
(
"build:windows --copt=/experimental:preprocessor"
)
write
(
"build:windows --host_copt=/experimental:preprocessor"
)
write
(
"build:windows --copt=/arch=AVX"
)
write
(
"build:windows --cxxopt=/std:"
+
get_cpp_version
())
write
(
"build:windows --host_cxxopt=/std:"
+
get_cpp_version
())
if
is_macos
()
or
is_linux
():
if
not
is_linux_ppc64le
()
and
not
is_linux_arm
()
and
not
is_linux_aarch64
():
write
(
"build --copt=-mavx"
)
write
(
"build --cxxopt=-std="
+
get_cpp_version
())
write
(
"build --host_cxxopt=-std="
+
get_cpp_version
())
print
(
"> Building only CPU ops"
)
print
()
print
(
"Build configurations successfully written to"
,
_TFA_BAZELRC
,
":
\n
"
)
print
(
pathlib
.
Path
(
_TFA_BAZELRC
).
read_text
())
if
__name__
==
"__main__"
:
create_build_configuration
()
Keras/keras-cv/build_deps/tf_dependency/BUILD
0 → 100644
View file @
0016b0a7
Keras/keras-cv/build_deps/tf_dependency/BUILD.tpl
0 → 100644
View file @
0016b0a7
package(default_visibility = ["//visibility:public"])
cc_library(
name = "tf_header_lib",
hdrs = [":tf_header_include"],
includes = ["include"],
visibility = ["//visibility:public"],
)
cc_library(
name = "libtensorflow_framework",
srcs = ["%
{
TF_SHARED_LIBRARY_NAME
}
"],
visibility = ["//visibility:public"],
)
%
{
TF_HEADER_GENRULE
}
%
{
TF_SHARED_LIBRARY_GENRULE
}
Keras/keras-cv/build_deps/tf_dependency/build_defs.bzl.tpl
0 → 100644
View file @
0016b0a7
# Addons Build Definitions inherited from TensorFlow Core
D_GLIBCXX_USE_CXX11_ABI = "%
{
tf_cx11_abi
}
"
CPLUSPLUS_VERSION = "%
{
tf_cplusplus_ver
}
"
Keras/keras-cv/build_deps/tf_dependency/tf_configure.bzl
0 → 100644
View file @
0016b0a7
"""Setup TensorFlow as external dependency"""
_TF_HEADER_DIR
=
"TF_HEADER_DIR"
_TF_SHARED_LIBRARY_DIR
=
"TF_SHARED_LIBRARY_DIR"
_TF_SHARED_LIBRARY_NAME
=
"TF_SHARED_LIBRARY_NAME"
_TF_CXX11_ABI_FLAG
=
"TF_CXX11_ABI_FLAG"
_TF_CPLUSPLUS_VER
=
"TF_CPLUSPLUS_VER"
def
_tpl
(
repository_ctx
,
tpl
,
substitutions
=
{},
out
=
None
):
if
not
out
:
out
=
tpl
repository_ctx
.
template
(
out
,
Label
(
"//build_deps/tf_dependency:%s.tpl"
%
tpl
),
substitutions
,
)
def
_fail
(
msg
):
"""Output failure message when auto configuration fails."""
red
=
"
\033
[0;31m"
no_color
=
"
\033
[0m"
fail
(
"%sPython Configuration Error:%s %s
\n
"
%
(
red
,
no_color
,
msg
))
def
_is_windows
(
repository_ctx
):
"""Returns true if the host operating system is windows."""
os_name
=
repository_ctx
.
os
.
name
.
lower
()
if
os_name
.
find
(
"windows"
)
!=
-
1
:
return
True
return
False
def
_execute
(
repository_ctx
,
cmdline
,
error_msg
=
None
,
error_details
=
None
,
empty_stdout_fine
=
False
):
"""Executes an arbitrary shell command.
Helper for executes an arbitrary shell command.
Args:
repository_ctx: the repository_ctx object.
cmdline: list of strings, the command to execute.
error_msg: string, a summary of the error if the command fails.
error_details: string, details about the error or steps to fix it.
empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
it's an error.
Returns:
The result of repository_ctx.execute(cmdline).
"""
result
=
repository_ctx
.
execute
(
cmdline
)
if
result
.
stderr
or
not
(
empty_stdout_fine
or
result
.
stdout
):
_fail
(
"
\n
"
.
join
([
error_msg
.
strip
()
if
error_msg
else
"Repository command failed"
,
result
.
stderr
.
strip
(),
error_details
if
error_details
else
""
,
]))
return
result
def
_read_dir
(
repository_ctx
,
src_dir
):
"""Returns a string with all files in a directory.
Finds all files inside a directory, traversing subfolders and following
symlinks. The returned string contains the full path of all files
separated by line breaks.
Args:
repository_ctx: the repository_ctx object.
src_dir: directory to find files from.
Returns:
A string of all files inside the given dir.
"""
if
_is_windows
(
repository_ctx
):
src_dir
=
src_dir
.
replace
(
"/"
,
"
\\
"
)
find_result
=
_execute
(
repository_ctx
,
[
"cmd.exe"
,
"/c"
,
"dir"
,
src_dir
,
"/b"
,
"/s"
,
"/a-d"
],
empty_stdout_fine
=
True
,
)
# src_files will be used in genrule.outs where the paths must
# use forward slashes.
result
=
find_result
.
stdout
.
replace
(
"
\\
"
,
"/"
)
else
:
find_result
=
_execute
(
repository_ctx
,
[
"find"
,
src_dir
,
"-follow"
,
"-type"
,
"f"
],
empty_stdout_fine
=
True
,
)
result
=
find_result
.
stdout
return
result
def
_genrule
(
genrule_name
,
command
,
outs
):
"""Returns a string with a genrule.
Genrule executes the given command and produces the given outputs.
Args:
genrule_name: A unique name for genrule target.
command: The command to run.
outs: A list of files generated by this rule.
Returns:
A genrule target.
"""
return
(
"genrule(
\n
"
+
' name = "'
+
genrule_name
+
'",
\n
'
+
" outs = [
\n
"
+
outs
+
"
\n
],
\n
"
+
' cmd = """
\n
'
+
command
+
'
\n
""",
\n
'
+
")
\n
"
)
def
_norm_path
(
path
):
"""Returns a path with '/' and remove the trailing slash."""
path
=
path
.
replace
(
"
\\
"
,
"/"
)
if
path
[
-
1
]
==
"/"
:
path
=
path
[:
-
1
]
return
path
def
_symlink_genrule_for_dir
(
repository_ctx
,
src_dir
,
dest_dir
,
genrule_name
,
src_files
=
[],
dest_files
=
[],
tf_pip_dir_rename_pair
=
[]):
"""Returns a genrule to symlink(or copy if on Windows) a set of files.
If src_dir is passed, files will be read from the given directory; otherwise
we assume files are in src_files and dest_files.
Args:
repository_ctx: the repository_ctx object.
src_dir: source directory.
dest_dir: directory to create symlink in.
genrule_name: genrule name.
src_files: list of source files instead of src_dir.
dest_files: list of corresonding destination files.
tf_pip_dir_rename_pair: list of the pair of tf pip parent directory to
replace. For example, in TF pip package, the source code is under
"tensorflow_core", and we might want to replace it with
"tensorflow" to match the header includes.
Returns:
genrule target that creates the symlinks.
"""
# Check that tf_pip_dir_rename_pair has the right length
tf_pip_dir_rename_pair_len
=
len
(
tf_pip_dir_rename_pair
)
if
tf_pip_dir_rename_pair_len
!=
0
and
tf_pip_dir_rename_pair_len
!=
2
:
_fail
(
"The size of argument tf_pip_dir_rename_pair should be either 0 or 2, but %d is given."
%
tf_pip_dir_rename_pair_len
)
if
src_dir
!=
None
:
src_dir
=
_norm_path
(
src_dir
)
dest_dir
=
_norm_path
(
dest_dir
)
files
=
"
\n
"
.
join
(
sorted
(
_read_dir
(
repository_ctx
,
src_dir
).
splitlines
()))
# Create a list with the src_dir stripped to use for outputs.
if
tf_pip_dir_rename_pair_len
:
dest_files
=
files
.
replace
(
src_dir
,
""
).
replace
(
tf_pip_dir_rename_pair
[
0
],
tf_pip_dir_rename_pair
[
1
]).
splitlines
()
else
:
dest_files
=
files
.
replace
(
src_dir
,
""
).
splitlines
()
src_files
=
files
.
splitlines
()
command
=
[]
outs
=
[]
for
i
in
range
(
len
(
dest_files
)):
if
dest_files
[
i
]
!=
""
:
# If we have only one file to link we do not want to use the dest_dir, as
# $(@D) will include the full path to the file.
dest
=
"$(@D)/"
+
dest_dir
+
dest_files
[
i
]
if
len
(
dest_files
)
!=
1
else
"$(@D)/"
+
dest_files
[
i
]
# Copy the headers to create a sandboxable setup.
cmd
=
"cp -f"
command
.
append
(
cmd
+
' "%s" "%s"'
%
(
src_files
[
i
],
dest
))
outs
.
append
(
' "'
+
dest_dir
+
dest_files
[
i
]
+
'",'
)
genrule
=
_genrule
(
genrule_name
,
";
\n
"
.
join
(
command
),
"
\n
"
.
join
(
outs
),
)
return
genrule
def
_tf_pip_impl
(
repository_ctx
):
tf_header_dir
=
repository_ctx
.
os
.
environ
[
_TF_HEADER_DIR
]
tf_header_rule
=
_symlink_genrule_for_dir
(
repository_ctx
,
tf_header_dir
,
"include"
,
"tf_header_include"
,
tf_pip_dir_rename_pair
=
[
"tensorflow_core"
,
"tensorflow"
],
)
tf_shared_library_dir
=
repository_ctx
.
os
.
environ
[
_TF_SHARED_LIBRARY_DIR
]
tf_shared_library_name
=
repository_ctx
.
os
.
environ
[
_TF_SHARED_LIBRARY_NAME
]
tf_shared_library_path
=
"%s/%s"
%
(
tf_shared_library_dir
,
tf_shared_library_name
)
tf_cx11_abi
=
"-D_GLIBCXX_USE_CXX11_ABI=%s"
%
(
repository_ctx
.
os
.
environ
[
_TF_CXX11_ABI_FLAG
])
tf_cplusplus_ver
=
"-std=%s"
%
repository_ctx
.
os
.
environ
[
_TF_CPLUSPLUS_VER
]
tf_shared_library_rule
=
_symlink_genrule_for_dir
(
repository_ctx
,
None
,
""
,
tf_shared_library_name
,
[
tf_shared_library_path
],
[
tf_shared_library_name
],
)
_tpl
(
repository_ctx
,
"BUILD"
,
{
"%{TF_HEADER_GENRULE}"
:
tf_header_rule
,
"%{TF_SHARED_LIBRARY_GENRULE}"
:
tf_shared_library_rule
,
"%{TF_SHARED_LIBRARY_NAME}"
:
tf_shared_library_name
,
})
_tpl
(
repository_ctx
,
"build_defs.bzl"
,
{
"%{tf_cx11_abi}"
:
tf_cx11_abi
,
"%{tf_cplusplus_ver}"
:
tf_cplusplus_ver
,
},
)
tf_configure
=
repository_rule
(
environ
=
[
_TF_HEADER_DIR
,
_TF_SHARED_LIBRARY_DIR
,
_TF_SHARED_LIBRARY_NAME
,
_TF_CXX11_ABI_FLAG
,
_TF_CPLUSPLUS_VER
,
],
implementation
=
_tf_pip_impl
,
)
Keras/keras-cv/cloudbuild/Dockerfile
0 → 100644
View file @
0016b0a7
# keras-cv-image:deps has all deps of KerasCV for testing.
FROM
us-west1-docker.pkg.dev/keras-team-test/keras-cv-test/keras-cv-image:deps
COPY
. /kerascv
WORKDIR
/kerascv
Keras/keras-cv/cloudbuild/README.md
0 → 100644
View file @
0016b0a7
# KerasCV Accelerators Testing
This
`cloudbuild/`
directory contains configurations for accelerators (GPU/TPU)
testing. Briefly, for each PR, it copies the PR's code to a base docker image
which contains KerasCV dependencies to make a new docker image, and deploys the
new image to Google Kubernetes Engine cluster, then run all tests in
`keras_cv/`
via Google Cloud Build.
-
`cloudbuild.yaml`
: The cloud build configuration that specifies steps to run
by cloud build.
-
`Dockerfile`
: The configuration to build the docker image for deployment.
-
`requirements.txt`
: Dependencies of KerasCV.
-
`unit_test_jobs.jsonnet`
: Jsonnet config that tells GKE cluster to run all
unit tests in
`keras_cv/`
.
This test is powered by
[
ml-testing-accelerators
](
https://github.com/GoogleCloudPlatform/ml-testing-accelerators
)
.
### Adding Test Dependencies
You must be authorized to run builds in the
`keras-team-test`
GCP project.
If you are not, please open a GitHub issue and ping a team member.
To authorize yourself with
`keras-team-test`
, run:
```
bash
gcloud config
set
project keras-team-test
```
To add a dependency for GPU tests:
-
Create a PR adding the dependency to
`requirements.txt`
-
Have a Keras team member update the Docker image for GPU tests by running the remaining steps
-
Create a
`Dockerfile`
with the following contents:
```
FROM tensorflow/tensorflow:2.10.0-gpu
RUN \
apt-get -y update && \
apt-get -y install openjdk-8-jdk && \
echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list && \
curl https://bazel.build/bazel-release.pub.gpg | apt-key add
RUN apt-get -y update
RUN apt-get -y install bazel
RUN apt-get -y install git
RUN git clone https://github.com/{path_to_keras_cv_fork}.git
RUN cd keras-cv && git checkout {branch_name}
RUN pip install -r keras-cv/cloudbuild/requirements.txt
```
-
Run the following command from the directory with your
`Dockerfile`
:
```
gcloud builds submit --region=us-west1 --tag us-west1-docker.pkg.dev/keras-team-test/keras-cv-test/keras-cv-image:deps --timeout=10m
```
-
Merge the PR adding the dependency
Keras/keras-cv/cloudbuild/cloudbuild.yaml
0 → 100644
View file @
0016b0a7
substitutions
:
# GCS bucket name.
_GCS_BUCKET
:
'
gs://keras-cv-github-test'
# GKE cluster name.
_CLUSTER_NAME
:
'
keras-cv-test-cluster'
# Location of GKE cluster.
_CLUSTER_ZONE
:
'
us-west1-b'
# Image name.
_IMAGE_NAME
:
'
us-west1-docker.pkg.dev/keras-team-test/keras-cv-test/keras-cv-image'
steps
:
-
name
:
'
docker'
id
:
build-image
args
:
[
'
build'
,
'
.'
,
'
-f'
,
'
cloudbuild/Dockerfile'
,
'
-t'
,
'
$_IMAGE_NAME:$BUILD_ID'
,
]
-
name
:
'
docker'
id
:
push-image
waitFor
:
-
build-image
args
:
[
'
push'
,
'
$_IMAGE_NAME:$BUILD_ID'
]
-
name
:
'
golang'
id
:
download-jsonnet
waitFor
:
[
'
-'
]
entrypoint
:
'
go'
args
:
[
'
install'
,
'
github.com/google/go-jsonnet/cmd/jsonnet@latest'
,
]
-
name
:
'
google/cloud-sdk'
id
:
clone-templates
waitFor
:
[
'
-'
]
entrypoint
:
'
git'
args
:
[
'
clone'
,
'
https://github.com/GoogleCloudPlatform/ml-testing-accelerators.git'
,
]
-
name
:
'
golang'
id
:
build-templates
waitFor
:
-
download-jsonnet
-
clone-templates
entrypoint
:
'
jsonnet'
args
:
[
'
cloudbuild/unit_test_jobs.jsonnet'
,
'
--string'
,
'
-J'
,
'
ml-testing-accelerators'
,
'
--ext-str'
,
'
image=$_IMAGE_NAME'
,
'
--ext-str'
,
'
tag_name=$BUILD_ID'
,
'
--ext-str'
,
'
gcs_bucket=$_GCS_BUCKET'
,
'
-o'
,
'
output.yaml'
,
]
-
name
:
'
google/cloud-sdk'
id
:
create-job
waitFor
:
-
push-image
-
build-templates
entrypoint
:
bash
args
:
-
-c
-
|
set -u
set -e
set -x
gcloud container clusters get-credentials $_CLUSTER_NAME --zone $_CLUSTER_ZONE --project keras-team-test
job_name=$(kubectl create -f output.yaml -o name)
sleep 5
pod_name=$(kubectl wait --for condition=ready --timeout=10m pod -l job-name=${job_name#job.batch/} -o name)
kubectl logs -f $pod_name --container=train
sleep 5
gcloud artifacts docker images delete $_IMAGE_NAME:$BUILD_ID
exit $(kubectl get $pod_name -o jsonpath={.status.containerStatuses[0].state.terminated.exitCode})
timeout
:
1800s
# 30 minutes
options
:
volumes
:
-
name
:
go-modules
path
:
/go
Keras/keras-cv/cloudbuild/requirements.txt
0 → 100644
View file @
0016b0a7
absl-py
packaging
pandas
tensorflow
tensorflow-datasets
flake8
regex
isort
black
pytest
\ No newline at end of file
Keras/keras-cv/cloudbuild/unit_test_jobs.jsonnet
0 → 100644
View file @
0016b0a7
local
base
=
import
'templates/base.libsonnet'
;
local
gpus
=
import
'templates/gpus.libsonnet'
;
local
image
=
std
.
extVar
(
'image'
);
local
tagName
=
std
.
extVar
(
'tag_name'
);
local
gcsBucket
=
std
.
extVar
(
'gcs_bucket'
);
local
unittest
=
base
.
BaseTest
{
// Configure job name.
frameworkPrefix
:
"tf"
,
modelName
:
"keras-cv"
,
mode
:
"unit-tests"
,
timeout
:
3600
,
# 1 hour, in seconds
// Set up runtime environment.
image
:
image
,
imageTag
:
tagName
,
accelerator
:
gpus
.
teslaT4
,
outputBucket
:
gcsBucket
,
entrypoint
:
[
'bash'
,
'-c'
,
|||
# Build custom ops from source
python build_deps/configure.py
bazel build keras_cv/custom_ops:all --verbose_failures
cp bazel-bin/keras_cv/custom_ops/*.so keras_cv/custom_ops/
TEST_CUSTOM_OPS=true
# Run whatever is in `command` here.
${@:0}
|||
],
command
:
[
'pytest'
,
'keras_cv'
,
],
};
std
.
manifestYamlDoc
(
unittest
.
oneshotJob
,
quote_keys
=
false
)
Keras/keras-cv/examples/layers/object_detection/anchor_generator_configuration.py
0 → 100644
View file @
0016b0a7
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
demo_utils
import
tensorflow
as
tf
from
keras_cv
import
layers
as
cv_layers
def
_default_anchor_generator
(
bounding_box_format
):
strides
=
[
50
]
sizes
=
[
100.0
]
scales
=
[
1.0
]
aspect_ratios
=
[
1.0
]
return
cv_layers
.
AnchorGenerator
(
bounding_box_format
=
bounding_box_format
,
anchor_sizes
=
sizes
,
aspect_ratios
=
aspect_ratios
,
scales
=
scales
,
strides
=
strides
,
clip_boxes
=
True
,
)
generator
=
_default_anchor_generator
(
bounding_box_format
=
"xywh"
)
def
pair_with_anchor_boxes
(
inputs
):
images
=
inputs
[
"images"
]
anchor_boxes
=
generator
(
images
[
0
])
anchor_boxes
=
anchor_boxes
[
0
]
anchor_boxes
=
tf
.
expand_dims
(
anchor_boxes
,
axis
=
0
)
anchor_boxes
=
tf
.
tile
(
anchor_boxes
,
[
tf
.
shape
(
images
)[
0
],
1
,
1
])
inputs
[
"bounding_boxes"
]
=
anchor_boxes
return
inputs
if
__name__
==
"__main__"
:
dataset
=
demo_utils
.
load_voc_dataset
(
bounding_box_format
=
"xywh"
)
result
=
dataset
.
map
(
pair_with_anchor_boxes
,
num_parallel_calls
=
tf
.
data
.
AUTOTUNE
)
demo_utils
.
visualize_data
(
result
,
bounding_box_format
=
"xywh"
)
Keras/keras-cv/examples/layers/object_detection/demo_utils.py
0 → 100644
View file @
0016b0a7
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for preprocessing demos."""
import
matplotlib.pyplot
as
plt
import
numpy
as
np
import
tensorflow
as
tf
import
tensorflow_datasets
as
tfds
from
keras_cv
import
bounding_box
def
preprocess_voc
(
inputs
,
format
,
image_size
):
"""mapping function to create batched image and bbox coordinates"""
inputs
[
"image"
]
=
tf
.
image
.
resize
(
inputs
[
"image"
],
image_size
)
inputs
[
"objects"
][
"bbox"
]
=
bounding_box
.
convert_format
(
inputs
[
"objects"
][
"bbox"
],
images
=
inputs
[
"image"
],
source
=
"rel_yxyx"
,
target
=
format
,
)
return
{
"images"
:
inputs
[
"image"
],
"bounding_boxes"
:
inputs
[
"objects"
][
"bbox"
]}
def
load_voc_dataset
(
bounding_box_format
,
name
=
"voc/2007"
,
batch_size
=
9
,
image_size
=
(
224
,
224
),
):
dataset
=
tfds
.
load
(
name
,
split
=
tfds
.
Split
.
TRAIN
,
shuffle_files
=
True
)
dataset
=
dataset
.
map
(
lambda
x
:
preprocess_voc
(
x
,
format
=
bounding_box_format
,
image_size
=
image_size
),
num_parallel_calls
=
tf
.
data
.
AUTOTUNE
,
)
dataset
=
dataset
.
padded_batch
(
batch_size
,
padding_values
=
{
"images"
:
None
,
"bounding_boxes"
:
-
1.0
}
)
return
dataset
def
visualize_data
(
data
,
bounding_box_format
):
data
=
next
(
iter
(
data
))
images
=
data
[
"images"
]
bounding_boxes
=
data
[
"bounding_boxes"
]
output_images
=
visualize_bounding_boxes
(
images
,
bounding_boxes
,
bounding_box_format
).
numpy
()
gallery_show
(
output_images
)
def
visualize_bounding_boxes
(
image
,
bounding_boxes
,
bounding_box_format
):
color
=
np
.
array
([[
255.0
,
0.0
,
0.0
]])
bounding_boxes
=
bounding_box
.
convert_format
(
bounding_boxes
,
source
=
bounding_box_format
,
target
=
"rel_yxyx"
,
images
=
image
,
)
return
tf
.
image
.
draw_bounding_boxes
(
image
,
bounding_boxes
,
color
,
name
=
None
)
def
gallery_show
(
images
):
images
=
images
.
astype
(
int
)
for
i
in
range
(
9
):
image
=
images
[
i
]
plt
.
subplot
(
3
,
3
,
i
+
1
)
plt
.
imshow
(
image
.
astype
(
"uint8"
))
plt
.
axis
(
"off"
)
plt
.
show
()
Keras/keras-cv/examples/layers/preprocessing/bounding_box/demo_utils.py
0 → 100644
View file @
0016b0a7
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for preprocessing demos."""
import
matplotlib.pyplot
as
plt
import
numpy
as
np
import
tensorflow
as
tf
import
tensorflow_datasets
as
tfds
from
keras_cv
import
bounding_box
def
preprocess_voc
(
inputs
,
format
,
image_size
):
"""mapping function to create batched image and bbox coordinates"""
inputs
[
"image"
]
=
tf
.
image
.
resize
(
inputs
[
"image"
],
image_size
)
inputs
[
"objects"
][
"bbox"
]
=
bounding_box
.
convert_format
(
inputs
[
"objects"
][
"bbox"
],
images
=
inputs
[
"image"
],
source
=
"rel_yxyx"
,
target
=
format
,
)
inputs
[
"objects"
][
"bbox"
]
=
bounding_box
.
add_class_id
(
inputs
[
"objects"
][
"bbox"
])
return
{
"images"
:
inputs
[
"image"
],
"bounding_boxes"
:
inputs
[
"objects"
][
"bbox"
]}
def
load_voc_dataset
(
bounding_box_format
,
name
=
"voc/2007"
,
batch_size
=
9
,
image_size
=
(
224
,
224
),
):
dataset
=
tfds
.
load
(
name
,
split
=
tfds
.
Split
.
TRAIN
,
shuffle_files
=
True
)
dataset
=
dataset
.
map
(
lambda
x
:
preprocess_voc
(
x
,
format
=
bounding_box_format
,
image_size
=
image_size
),
num_parallel_calls
=
tf
.
data
.
AUTOTUNE
,
)
dataset
=
dataset
.
padded_batch
(
batch_size
,
padding_values
=
{
"images"
:
None
,
"bounding_boxes"
:
-
1.0
}
)
return
dataset
def
visualize_data
(
data
,
bounding_box_format
):
data
=
next
(
iter
(
data
))
images
=
data
[
"images"
]
bounding_boxes
=
data
[
"bounding_boxes"
]
output_images
=
visualize_bounding_boxes
(
images
,
bounding_boxes
,
bounding_box_format
).
numpy
()
gallery_show
(
output_images
)
def
visualize_bounding_boxes
(
image
,
bounding_boxes
,
bounding_box_format
):
color
=
np
.
array
([[
255.0
,
0.0
,
0.0
]])
bounding_boxes
=
bounding_boxes
[...,
:
4
]
bounding_boxes
=
bounding_box
.
convert_format
(
bounding_boxes
,
source
=
bounding_box_format
,
target
=
"rel_yxyx"
,
images
=
image
,
)
return
tf
.
image
.
draw_bounding_boxes
(
image
,
bounding_boxes
,
color
,
name
=
None
)
def
gallery_show
(
images
):
images
=
images
.
astype
(
int
)
for
i
in
range
(
9
):
image
=
images
[
i
]
plt
.
subplot
(
3
,
3
,
i
+
1
)
plt
.
imshow
(
image
.
astype
(
"uint8"
))
plt
.
axis
(
"off"
)
plt
.
show
()
Keras/keras-cv/examples/layers/preprocessing/bounding_box/mosaic_demo.py
0 → 100644
View file @
0016b0a7
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
mosaic_demo.py shows how to use the Mosaic preprocessing layer for
object detection.
"""
import
demo_utils
import
tensorflow
as
tf
from
keras_cv.layers
import
preprocessing
IMG_SIZE
=
(
256
,
256
)
BATCH_SIZE
=
9
def
main
():
dataset
=
demo_utils
.
load_voc_dataset
(
bounding_box_format
=
"rel_xyxy"
)
mosaic
=
preprocessing
.
Mosaic
(
bounding_box_format
=
"rel_xyxy"
)
result
=
dataset
.
map
(
mosaic
,
num_parallel_calls
=
tf
.
data
.
AUTOTUNE
)
demo_utils
.
visualize_data
(
result
,
bounding_box_format
=
"rel_xyxy"
)
if
__name__
==
"__main__"
:
main
()
Keras/keras-cv/examples/layers/preprocessing/bounding_box/random_flip_demo.py
0 → 100644
View file @
0016b0a7
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
random_flip_demo.py shows how to use the RandomFlip preprocessing layer for
object detection.
"""
import
demo_utils
import
tensorflow
as
tf
from
keras_cv.layers
import
preprocessing
IMG_SIZE
=
(
256
,
256
)
BATCH_SIZE
=
9
def
main
():
dataset
=
demo_utils
.
load_voc_dataset
(
bounding_box_format
=
"rel_xyxy"
)
random_rotation
=
preprocessing
.
RandomFlip
(
bounding_box_format
=
"rel_xyxy"
)
result
=
dataset
.
map
(
random_rotation
,
num_parallel_calls
=
tf
.
data
.
AUTOTUNE
)
demo_utils
.
visualize_data
(
result
,
bounding_box_format
=
"rel_xyxy"
)
if
__name__
==
"__main__"
:
main
()
Keras/keras-cv/examples/layers/preprocessing/bounding_box/random_rotation_demo.py
0 → 100644
View file @
0016b0a7
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
random_rotation_demo.py shows how to use the RandomRotation preprocessing layer
for object detection.
"""
import
demo_utils
import
tensorflow
as
tf
from
keras_cv.layers
import
preprocessing
IMG_SIZE
=
(
256
,
256
)
BATCH_SIZE
=
9
def
main
():
dataset
=
demo_utils
.
load_voc_dataset
(
bounding_box_format
=
"rel_xyxy"
)
random_rotation
=
preprocessing
.
RandomRotation
(
factor
=
0.5
,
bounding_box_format
=
"rel_xyxy"
)
result
=
dataset
.
map
(
random_rotation
,
num_parallel_calls
=
tf
.
data
.
AUTOTUNE
)
demo_utils
.
visualize_data
(
result
,
bounding_box_format
=
"rel_xyxy"
)
if
__name__
==
"__main__"
:
main
()
Keras/keras-cv/examples/layers/preprocessing/bounding_box/random_shear_demo.py
0 → 100644
View file @
0016b0a7
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
random_shear_demo.py shows how to use the RandomShear preprocessing layer
for object detection.
"""
import
demo_utils
import
tensorflow
as
tf
from
keras_cv.layers
import
preprocessing
IMG_SIZE
=
(
256
,
256
)
BATCH_SIZE
=
9
def
main
():
dataset
=
demo_utils
.
load_voc_dataset
(
bounding_box_format
=
"rel_xyxy"
)
random_shear
=
preprocessing
.
RandomShear
(
x_factor
=
(
0.1
,
0.5
),
y_factor
=
(
0.1
,
0.5
),
bounding_box_format
=
"rel_xyxy"
,
)
dataset
=
dataset
.
map
(
random_shear
,
num_parallel_calls
=
tf
.
data
.
AUTOTUNE
)
demo_utils
.
visualize_data
(
dataset
,
bounding_box_format
=
"rel_xyxy"
)
if
__name__
==
"__main__"
:
main
()
Prev
1
2
3
4
5
6
…
17
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment