Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
0e496155
Unverified
Commit
0e496155
authored
Jun 30, 2023
by
Philip Meier
Committed by
GitHub
Jun 30, 2023
Browse files
port affine tests (#7708)
parent
22d981f4
Changes
5
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
453 additions
and
416 deletions
+453
-416
test/test_transforms_v2.py
test/test_transforms_v2.py
+0
-124
test/test_transforms_v2_functional.py
test/test_transforms_v2_functional.py
+0
-83
test/test_transforms_v2_refactored.py
test/test_transforms_v2_refactored.py
+453
-12
test/transforms_v2_dispatcher_infos.py
test/transforms_v2_dispatcher_infos.py
+0
-15
test/transforms_v2_kernel_infos.py
test/transforms_v2_kernel_infos.py
+0
-182
No files found.
test/test_transforms_v2.py
View file @
0e496155
...
...
@@ -668,130 +668,6 @@ class TestRandomRotation:
assert
out_img
.
spatial_size
==
out_bbox
.
spatial_size
class
TestRandomAffine
:
def
test_assertions
(
self
):
with
pytest
.
raises
(
ValueError
,
match
=
"is a single number, it must be positive"
):
transforms
.
RandomAffine
(
-
0.7
)
for
d
in
[[
-
0.7
],
[
-
0.7
,
0
,
0.7
]]:
with
pytest
.
raises
(
ValueError
,
match
=
"degrees should be a sequence of length 2"
):
transforms
.
RandomAffine
(
d
)
with
pytest
.
raises
(
TypeError
,
match
=
"Got inappropriate fill arg"
):
transforms
.
RandomAffine
(
12
,
fill
=
"abc"
)
with
pytest
.
raises
(
TypeError
,
match
=
"Got inappropriate fill arg"
):
transforms
.
RandomAffine
(
12
,
fill
=
"abc"
)
for
kwargs
in
[
{
"center"
:
12
},
{
"translate"
:
12
},
{
"scale"
:
12
},
]:
with
pytest
.
raises
(
TypeError
,
match
=
"should be a sequence of length"
):
transforms
.
RandomAffine
(
12
,
**
kwargs
)
for
kwargs
in
[{
"center"
:
[
1
,
2
,
3
]},
{
"translate"
:
[
1
,
2
,
3
]},
{
"scale"
:
[
1
,
2
,
3
]}]:
with
pytest
.
raises
(
ValueError
,
match
=
"should be a sequence of length"
):
transforms
.
RandomAffine
(
12
,
**
kwargs
)
with
pytest
.
raises
(
ValueError
,
match
=
"translation values should be between 0 and 1"
):
transforms
.
RandomAffine
(
12
,
translate
=
[
-
1.0
,
2.0
])
with
pytest
.
raises
(
ValueError
,
match
=
"scale values should be positive"
):
transforms
.
RandomAffine
(
12
,
scale
=
[
-
1.0
,
2.0
])
with
pytest
.
raises
(
ValueError
,
match
=
"is a single number, it must be positive"
):
transforms
.
RandomAffine
(
12
,
shear
=-
10
)
for
s
in
[[
-
0.7
],
[
-
0.7
,
0
,
0.7
]]:
with
pytest
.
raises
(
ValueError
,
match
=
"shear should be a sequence of length 2"
):
transforms
.
RandomAffine
(
12
,
shear
=
s
)
@
pytest
.
mark
.
parametrize
(
"degrees"
,
[
23
,
[
0
,
45
],
(
0
,
45
)])
@
pytest
.
mark
.
parametrize
(
"translate"
,
[
None
,
[
0.1
,
0.2
]])
@
pytest
.
mark
.
parametrize
(
"scale"
,
[
None
,
[
0.7
,
1.2
]])
@
pytest
.
mark
.
parametrize
(
"shear"
,
[
None
,
2.0
,
[
5.0
,
15.0
],
[
1.0
,
2.0
,
3.0
,
4.0
]])
def
test__get_params
(
self
,
degrees
,
translate
,
scale
,
shear
,
mocker
):
image
=
mocker
.
MagicMock
(
spec
=
datapoints
.
Image
)
image
.
num_channels
=
3
image
.
spatial_size
=
(
24
,
32
)
h
,
w
=
image
.
spatial_size
transform
=
transforms
.
RandomAffine
(
degrees
,
translate
=
translate
,
scale
=
scale
,
shear
=
shear
)
params
=
transform
.
_get_params
([
image
])
if
not
isinstance
(
degrees
,
(
list
,
tuple
)):
assert
-
degrees
<=
params
[
"angle"
]
<=
degrees
else
:
assert
degrees
[
0
]
<=
params
[
"angle"
]
<=
degrees
[
1
]
if
translate
is
not
None
:
w_max
=
int
(
round
(
translate
[
0
]
*
w
))
h_max
=
int
(
round
(
translate
[
1
]
*
h
))
assert
-
w_max
<=
params
[
"translate"
][
0
]
<=
w_max
assert
-
h_max
<=
params
[
"translate"
][
1
]
<=
h_max
else
:
assert
params
[
"translate"
]
==
(
0
,
0
)
if
scale
is
not
None
:
assert
scale
[
0
]
<=
params
[
"scale"
]
<=
scale
[
1
]
else
:
assert
params
[
"scale"
]
==
1.0
if
shear
is
not
None
:
if
isinstance
(
shear
,
float
):
assert
-
shear
<=
params
[
"shear"
][
0
]
<=
shear
assert
params
[
"shear"
][
1
]
==
0.0
elif
len
(
shear
)
==
2
:
assert
shear
[
0
]
<=
params
[
"shear"
][
0
]
<=
shear
[
1
]
assert
params
[
"shear"
][
1
]
==
0.0
else
:
assert
shear
[
0
]
<=
params
[
"shear"
][
0
]
<=
shear
[
1
]
assert
shear
[
2
]
<=
params
[
"shear"
][
1
]
<=
shear
[
3
]
else
:
assert
params
[
"shear"
]
==
(
0
,
0
)
@
pytest
.
mark
.
parametrize
(
"degrees"
,
[
23
,
[
0
,
45
],
(
0
,
45
)])
@
pytest
.
mark
.
parametrize
(
"translate"
,
[
None
,
[
0.1
,
0.2
]])
@
pytest
.
mark
.
parametrize
(
"scale"
,
[
None
,
[
0.7
,
1.2
]])
@
pytest
.
mark
.
parametrize
(
"shear"
,
[
None
,
2.0
,
[
5.0
,
15.0
],
[
1.0
,
2.0
,
3.0
,
4.0
]])
@
pytest
.
mark
.
parametrize
(
"fill"
,
[
0
,
[
1
,
2
,
3
],
(
2
,
3
,
4
)])
@
pytest
.
mark
.
parametrize
(
"center"
,
[
None
,
[
2.0
,
3.0
]])
def
test__transform
(
self
,
degrees
,
translate
,
scale
,
shear
,
fill
,
center
,
mocker
):
interpolation
=
InterpolationMode
.
BILINEAR
transform
=
transforms
.
RandomAffine
(
degrees
,
translate
=
translate
,
scale
=
scale
,
shear
=
shear
,
interpolation
=
interpolation
,
fill
=
fill
,
center
=
center
,
)
if
isinstance
(
degrees
,
(
tuple
,
list
)):
assert
transform
.
degrees
==
[
float
(
degrees
[
0
]),
float
(
degrees
[
1
])]
else
:
assert
transform
.
degrees
==
[
float
(
-
degrees
),
float
(
degrees
)]
fn
=
mocker
.
patch
(
"torchvision.transforms.v2.functional.affine"
)
inpt
=
mocker
.
MagicMock
(
spec
=
datapoints
.
Image
)
inpt
.
num_channels
=
3
inpt
.
spatial_size
=
(
24
,
32
)
# vfdev-5, Feature Request: let's store params as Transform attribute
# This could be also helpful for users
# Otherwise, we can mock transform._get_params
torch
.
manual_seed
(
12
)
_
=
transform
(
inpt
)
torch
.
manual_seed
(
12
)
params
=
transform
.
_get_params
([
inpt
])
fill
=
transforms
.
_utils
.
_convert_fill_arg
(
fill
)
fn
.
assert_called_once_with
(
inpt
,
**
params
,
interpolation
=
interpolation
,
fill
=
fill
,
center
=
center
)
class
TestRandomCrop
:
def
test_assertions
(
self
):
with
pytest
.
raises
(
ValueError
,
match
=
"Please provide only two dimensions"
):
...
...
test/test_transforms_v2_functional.py
View file @
0e496155
...
...
@@ -665,77 +665,6 @@ def _compute_affine_matrix(angle_, translate_, scale_, shear_, center_):
return
true_matrix
@
pytest
.
mark
.
parametrize
(
"device"
,
cpu_and_cuda
())
def
test_correctness_affine_bounding_box_on_fixed_input
(
device
):
# Check transformation against known expected output
format
=
datapoints
.
BoundingBoxFormat
.
XYXY
spatial_size
=
(
64
,
64
)
in_boxes
=
[
[
20
,
25
,
35
,
45
],
[
50
,
5
,
70
,
22
],
[
spatial_size
[
1
]
//
2
-
10
,
spatial_size
[
0
]
//
2
-
10
,
spatial_size
[
1
]
//
2
+
10
,
spatial_size
[
0
]
//
2
+
10
],
[
1
,
1
,
5
,
5
],
]
in_boxes
=
torch
.
tensor
(
in_boxes
,
dtype
=
torch
.
float64
,
device
=
device
)
# Tested parameters
angle
=
63
scale
=
0.89
dx
=
0.12
dy
=
0.23
# Expected bboxes computed using albumentations:
# from albumentations.augmentations.geometric.functional import bbox_shift_scale_rotate
# from albumentations.augmentations.geometric.functional import normalize_bbox, denormalize_bbox
# expected_bboxes = []
# for in_box in in_boxes:
# n_in_box = normalize_bbox(in_box, *spatial_size)
# n_out_box = bbox_shift_scale_rotate(n_in_box, -angle, scale, dx, dy, *spatial_size)
# out_box = denormalize_bbox(n_out_box, *spatial_size)
# expected_bboxes.append(out_box)
expected_bboxes
=
[
(
24.522435977922218
,
34.375689508290854
,
46.443125279998114
,
54.3516575015695
),
(
54.88288587110401
,
50.08453280875634
,
76.44484547743795
,
72.81332520036864
),
(
27.709526487041554
,
34.74952648704156
,
51.650473512958435
,
58.69047351295844
),
(
48.56528888843238
,
9.611532109828834
,
53.35347829361575
,
14.39972151501221
),
]
expected_bboxes
=
clamp_bounding_box
(
datapoints
.
BoundingBox
(
expected_bboxes
,
format
=
"XYXY"
,
spatial_size
=
spatial_size
)
).
tolist
()
output_boxes
=
F
.
affine_bounding_box
(
in_boxes
,
format
=
format
,
spatial_size
=
spatial_size
,
angle
=
angle
,
translate
=
(
dx
*
spatial_size
[
1
],
dy
*
spatial_size
[
0
]),
scale
=
scale
,
shear
=
(
0
,
0
),
)
torch
.
testing
.
assert_close
(
output_boxes
.
tolist
(),
expected_bboxes
)
@
pytest
.
mark
.
parametrize
(
"device"
,
cpu_and_cuda
())
def
test_correctness_affine_segmentation_mask_on_fixed_input
(
device
):
# Check transformation against known expected output and CPU/CUDA devices
# Create a fixed input segmentation mask with 2 square masks
# in top-left, bottom-left corners
mask
=
torch
.
zeros
(
1
,
32
,
32
,
dtype
=
torch
.
long
,
device
=
device
)
mask
[
0
,
2
:
10
,
2
:
10
]
=
1
mask
[
0
,
32
-
9
:
32
-
3
,
3
:
9
]
=
2
# Rotate 90 degrees and scale
expected_mask
=
torch
.
rot90
(
mask
,
k
=-
1
,
dims
=
(
-
2
,
-
1
))
expected_mask
=
torch
.
nn
.
functional
.
interpolate
(
expected_mask
[
None
,
:].
float
(),
size
=
(
64
,
64
),
mode
=
"nearest"
)
expected_mask
=
expected_mask
[
0
,
:,
16
:
64
-
16
,
16
:
64
-
16
].
long
()
out_mask
=
F
.
affine_mask
(
mask
,
90
,
[
0.0
,
0.0
],
64.0
/
32.0
,
[
0.0
,
0.0
])
torch
.
testing
.
assert_close
(
out_mask
,
expected_mask
)
@
pytest
.
mark
.
parametrize
(
"angle"
,
range
(
-
90
,
90
,
56
))
@
pytest
.
mark
.
parametrize
(
"expand, center"
,
[(
True
,
None
),
(
False
,
None
),
(
False
,
(
12
,
14
))])
def
test_correctness_rotate_bounding_box
(
angle
,
expand
,
center
):
...
...
@@ -950,18 +879,6 @@ def test_correctness_crop_bounding_box(device, format, top, left, height, width,
torch
.
testing
.
assert_close
(
output_spatial_size
,
spatial_size
)
@
pytest
.
mark
.
parametrize
(
"device"
,
cpu_and_cuda
())
def
test_correctness_horizontal_flip_segmentation_mask_on_fixed_input
(
device
):
mask
=
torch
.
zeros
((
3
,
3
,
3
),
dtype
=
torch
.
long
,
device
=
device
)
mask
[:,
:,
0
]
=
1
out_mask
=
F
.
horizontal_flip_mask
(
mask
)
expected_mask
=
torch
.
zeros
((
3
,
3
,
3
),
dtype
=
torch
.
long
,
device
=
device
)
expected_mask
[:,
:,
-
1
]
=
1
torch
.
testing
.
assert_close
(
out_mask
,
expected_mask
)
@
pytest
.
mark
.
parametrize
(
"device"
,
cpu_and_cuda
())
def
test_correctness_vertical_flip_segmentation_mask_on_fixed_input
(
device
):
mask
=
torch
.
zeros
((
3
,
3
,
3
),
dtype
=
torch
.
long
,
device
=
device
)
...
...
test/test_transforms_v2_refactored.py
View file @
0e496155
This diff is collapsed.
Click to expand it.
test/transforms_v2_dispatcher_infos.py
View file @
0e496155
...
...
@@ -138,21 +138,6 @@ xfails_pil_if_fill_sequence_needs_broadcast = xfails_pil(
DISPATCHER_INFOS
=
[
DispatcherInfo
(
F
.
affine
,
kernels
=
{
datapoints
.
Image
:
F
.
affine_image_tensor
,
datapoints
.
Video
:
F
.
affine_video
,
datapoints
.
BoundingBox
:
F
.
affine_bounding_box
,
datapoints
.
Mask
:
F
.
affine_mask
,
},
pil_kernel_info
=
PILKernelInfo
(
F
.
affine_image_pil
),
test_marks
=
[
*
xfails_pil_if_fill_sequence_needs_broadcast
,
xfail_jit_python_scalar_arg
(
"shear"
),
xfail_jit_python_scalar_arg
(
"fill"
),
],
),
DispatcherInfo
(
F
.
vertical_flip
,
kernels
=
{
...
...
test/transforms_v2_kernel_infos.py
View file @
0e496155
import
decimal
import
functools
import
itertools
import
math
import
numpy
as
np
import
PIL.Image
...
...
@@ -156,46 +155,6 @@ def xfail_jit_python_scalar_arg(name, *, reason=None):
KERNEL_INFOS
=
[]
_AFFINE_KWARGS
=
combinations_grid
(
angle
=
[
-
87
,
15
,
90
],
translate
=
[(
5
,
5
),
(
-
5
,
-
5
)],
scale
=
[
0.77
,
1.27
],
shear
=
[(
12
,
12
),
(
0
,
0
)],
)
def
_diversify_affine_kwargs_types
(
affine_kwargs
):
angle
=
affine_kwargs
[
"angle"
]
for
diverse_angle
in
[
int
(
angle
),
float
(
angle
)]:
yield
dict
(
affine_kwargs
,
angle
=
diverse_angle
)
shear
=
affine_kwargs
[
"shear"
]
for
diverse_shear
in
[
tuple
(
shear
),
list
(
shear
),
int
(
shear
[
0
]),
float
(
shear
[
0
])]:
yield
dict
(
affine_kwargs
,
shear
=
diverse_shear
)
def
_full_affine_params
(
**
partial_params
):
partial_params
.
setdefault
(
"angle"
,
0.0
)
partial_params
.
setdefault
(
"translate"
,
[
0.0
,
0.0
])
partial_params
.
setdefault
(
"scale"
,
1.0
)
partial_params
.
setdefault
(
"shear"
,
[
0.0
,
0.0
])
partial_params
.
setdefault
(
"center"
,
None
)
return
partial_params
_DIVERSE_AFFINE_PARAMS
=
[
_full_affine_params
(
**
{
name
:
arg
})
for
name
,
args
in
[
(
"angle"
,
[
1.0
,
2
]),
(
"translate"
,
[[
1.0
,
0.5
],
[
1
,
2
],
(
1.0
,
0.5
),
(
1
,
2
)]),
(
"scale"
,
[
0.5
]),
(
"shear"
,
[
1.0
,
2
,
[
1.0
],
[
2
],
(
1.0
,),
(
2
,),
[
1.0
,
0.5
],
[
1
,
2
],
(
1.0
,
0.5
),
(
1
,
2
)]),
(
"center"
,
[
None
,
[
1.0
,
0.5
],
[
1
,
2
],
(
1.0
,
0.5
),
(
1
,
2
)]),
]
for
arg
in
args
]
def
get_fills
(
*
,
num_channels
,
dtype
):
yield
None
...
...
@@ -226,72 +185,6 @@ def float32_vs_uint8_fill_adapter(other_args, kwargs):
return
other_args
,
dict
(
kwargs
,
fill
=
fill
)
def
sample_inputs_affine_image_tensor
():
make_affine_image_loaders
=
functools
.
partial
(
make_image_loaders
,
sizes
=
[
"random"
],
color_spaces
=
[
"RGB"
],
dtypes
=
[
torch
.
float32
]
)
for
image_loader
,
affine_params
in
itertools
.
product
(
make_affine_image_loaders
(),
_DIVERSE_AFFINE_PARAMS
):
yield
ArgsKwargs
(
image_loader
,
**
affine_params
)
for
image_loader
in
make_affine_image_loaders
():
for
fill
in
get_fills
(
num_channels
=
image_loader
.
num_channels
,
dtype
=
image_loader
.
dtype
):
yield
ArgsKwargs
(
image_loader
,
**
_full_affine_params
(),
fill
=
fill
)
for
image_loader
,
interpolation
in
itertools
.
product
(
make_affine_image_loaders
(),
[
F
.
InterpolationMode
.
NEAREST
,
F
.
InterpolationMode
.
BILINEAR
,
],
):
yield
ArgsKwargs
(
image_loader
,
**
_full_affine_params
(),
fill
=
0
)
def
reference_inputs_affine_image_tensor
():
for
image_loader
,
affine_kwargs
in
itertools
.
product
(
make_image_loaders_for_interpolation
(),
_AFFINE_KWARGS
):
yield
ArgsKwargs
(
image_loader
,
interpolation
=
F
.
InterpolationMode
.
NEAREST
,
**
affine_kwargs
,
)
def
sample_inputs_affine_bounding_box
():
for
bounding_box_loader
,
affine_params
in
itertools
.
product
(
make_bounding_box_loaders
(
formats
=
[
datapoints
.
BoundingBoxFormat
.
XYXY
]),
_DIVERSE_AFFINE_PARAMS
):
yield
ArgsKwargs
(
bounding_box_loader
,
format
=
bounding_box_loader
.
format
,
spatial_size
=
bounding_box_loader
.
spatial_size
,
**
affine_params
,
)
def
_compute_affine_matrix
(
angle
,
translate
,
scale
,
shear
,
center
):
rot
=
math
.
radians
(
angle
)
cx
,
cy
=
center
tx
,
ty
=
translate
sx
,
sy
=
[
math
.
radians
(
sh_
)
for
sh_
in
shear
]
c_matrix
=
np
.
array
([[
1
,
0
,
cx
],
[
0
,
1
,
cy
],
[
0
,
0
,
1
]])
t_matrix
=
np
.
array
([[
1
,
0
,
tx
],
[
0
,
1
,
ty
],
[
0
,
0
,
1
]])
c_matrix_inv
=
np
.
linalg
.
inv
(
c_matrix
)
rs_matrix
=
np
.
array
(
[
[
scale
*
math
.
cos
(
rot
),
-
scale
*
math
.
sin
(
rot
),
0
],
[
scale
*
math
.
sin
(
rot
),
scale
*
math
.
cos
(
rot
),
0
],
[
0
,
0
,
1
],
]
)
shear_x_matrix
=
np
.
array
([[
1
,
-
math
.
tan
(
sx
),
0
],
[
0
,
1
,
0
],
[
0
,
0
,
1
]])
shear_y_matrix
=
np
.
array
([[
1
,
0
,
0
],
[
-
math
.
tan
(
sy
),
1
,
0
],
[
0
,
0
,
1
]])
rss_matrix
=
np
.
matmul
(
rs_matrix
,
np
.
matmul
(
shear_y_matrix
,
shear_x_matrix
))
true_matrix
=
np
.
matmul
(
t_matrix
,
np
.
matmul
(
c_matrix
,
np
.
matmul
(
rss_matrix
,
c_matrix_inv
)))
return
true_matrix
def
reference_affine_bounding_box_helper
(
bounding_box
,
*
,
format
,
spatial_size
,
affine_matrix
):
def
transform
(
bbox
,
affine_matrix_
,
format_
,
spatial_size_
):
# Go to float before converting to prevent precision loss in case of CXCYWH -> XYXY and W or H is 1
...
...
@@ -342,81 +235,6 @@ def reference_affine_bounding_box_helper(bounding_box, *, format, spatial_size,
return
expected_bboxes
def
reference_affine_bounding_box
(
bounding_box
,
*
,
format
,
spatial_size
,
angle
,
translate
,
scale
,
shear
,
center
=
None
):
if
center
is
None
:
center
=
[
s
*
0.5
for
s
in
spatial_size
[::
-
1
]]
affine_matrix
=
_compute_affine_matrix
(
angle
,
translate
,
scale
,
shear
,
center
)
affine_matrix
=
affine_matrix
[:
2
,
:]
expected_bboxes
=
reference_affine_bounding_box_helper
(
bounding_box
,
format
=
format
,
spatial_size
=
spatial_size
,
affine_matrix
=
affine_matrix
)
return
expected_bboxes
def
reference_inputs_affine_bounding_box
():
for
bounding_box_loader
,
affine_kwargs
in
itertools
.
product
(
make_bounding_box_loaders
(
extra_dims
=
[()]),
_AFFINE_KWARGS
,
):
yield
ArgsKwargs
(
bounding_box_loader
,
format
=
bounding_box_loader
.
format
,
spatial_size
=
bounding_box_loader
.
spatial_size
,
**
affine_kwargs
,
)
def
sample_inputs_affine_mask
():
for
mask_loader
in
make_mask_loaders
(
sizes
=
[
"random"
],
num_categories
=
[
"random"
],
num_objects
=
[
"random"
]):
yield
ArgsKwargs
(
mask_loader
,
**
_full_affine_params
())
def
sample_inputs_affine_video
():
for
video_loader
in
make_video_loaders
(
sizes
=
[
"random"
],
num_frames
=
[
"random"
]):
yield
ArgsKwargs
(
video_loader
,
**
_full_affine_params
())
KERNEL_INFOS
.
extend
(
[
KernelInfo
(
F
.
affine_image_tensor
,
sample_inputs_fn
=
sample_inputs_affine_image_tensor
,
reference_fn
=
pil_reference_wrapper
(
F
.
affine_image_pil
),
reference_inputs_fn
=
reference_inputs_affine_image_tensor
,
float32_vs_uint8
=
True
,
closeness_kwargs
=
pil_reference_pixel_difference
(
10
,
mae
=
True
),
test_marks
=
[
xfail_jit_python_scalar_arg
(
"shear"
),
xfail_jit_python_scalar_arg
(
"fill"
),
],
),
KernelInfo
(
F
.
affine_bounding_box
,
sample_inputs_fn
=
sample_inputs_affine_bounding_box
,
reference_fn
=
reference_affine_bounding_box
,
reference_inputs_fn
=
reference_inputs_affine_bounding_box
,
test_marks
=
[
xfail_jit_python_scalar_arg
(
"shear"
),
],
),
KernelInfo
(
F
.
affine_mask
,
sample_inputs_fn
=
sample_inputs_affine_mask
,
test_marks
=
[
xfail_jit_python_scalar_arg
(
"shear"
),
],
),
KernelInfo
(
F
.
affine_video
,
sample_inputs_fn
=
sample_inputs_affine_video
,
),
]
)
def
sample_inputs_convert_format_bounding_box
():
formats
=
list
(
datapoints
.
BoundingBoxFormat
)
for
bounding_box_loader
,
new_format
in
itertools
.
product
(
make_bounding_box_loaders
(
formats
=
formats
),
formats
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment