Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
d0e16b76
"git@developer.sourcefind.cn:OpenDAS/dgl.git" did not exist on "d9c25521bcbdbcaa6d2927ce04df0eeb59bafa99"
Unverified
Commit
d0e16b76
authored
Sep 04, 2023
by
Philip Meier
Committed by
GitHub
Sep 04, 2023
Browse files
allow len 1 sequences for fill with PIL (#7928)
parent
439c5e34
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
4 additions
and
42 deletions
+4
-42
test/test_transforms_v2_refactored.py
test/test_transforms_v2_refactored.py
+0
-3
test/transforms_v2_dispatcher_infos.py
test/transforms_v2_dispatcher_infos.py
+0
-37
torchvision/transforms/_functional_pil.py
torchvision/transforms/_functional_pil.py
+4
-2
No files found.
test/test_transforms_v2_refactored.py
View file @
d0e16b76
...
@@ -2581,9 +2581,6 @@ class TestCrop:
...
@@ -2581,9 +2581,6 @@ class TestCrop:
# 2. the fill parameter only has an affect if we need padding
# 2. the fill parameter only has an affect if we need padding
kwargs
[
"size"
]
=
[
s
+
4
for
s
in
self
.
INPUT_SIZE
]
kwargs
[
"size"
]
=
[
s
+
4
for
s
in
self
.
INPUT_SIZE
]
if
isinstance
(
input
,
PIL
.
Image
.
Image
)
and
isinstance
(
value
,
(
tuple
,
list
))
and
len
(
value
)
==
1
:
pytest
.
xfail
(
"F._pad_image_pil does not support sequences of length 1 for fill."
)
if
isinstance
(
input
,
tv_tensors
.
Mask
)
and
isinstance
(
value
,
(
tuple
,
list
)):
if
isinstance
(
input
,
tv_tensors
.
Mask
)
and
isinstance
(
value
,
(
tuple
,
list
)):
pytest
.
skip
(
"F.pad_mask doesn't support non-scalar fill."
)
pytest
.
skip
(
"F.pad_mask doesn't support non-scalar fill."
)
...
...
test/transforms_v2_dispatcher_infos.py
View file @
d0e16b76
import
collections.abc
import
pytest
import
pytest
import
torchvision.transforms.v2.functional
as
F
import
torchvision.transforms.v2.functional
as
F
from
torchvision
import
tv_tensors
from
torchvision
import
tv_tensors
...
@@ -112,32 +110,6 @@ multi_crop_skips = [
...
@@ -112,32 +110,6 @@ multi_crop_skips = [
multi_crop_skips
.
append
(
skip_dispatch_tv_tensor
)
multi_crop_skips
.
append
(
skip_dispatch_tv_tensor
)
def
xfails_pil
(
reason
,
*
,
condition
=
None
):
return
[
TestMark
((
"TestDispatchers"
,
test_name
),
pytest
.
mark
.
xfail
(
reason
=
reason
),
condition
=
condition
)
for
test_name
in
[
"test_dispatch_pil"
,
"test_pil_output_type"
]
]
def
fill_sequence_needs_broadcast
(
args_kwargs
):
(
image_loader
,
*
_
),
kwargs
=
args_kwargs
try
:
fill
=
kwargs
[
"fill"
]
except
KeyError
:
return
False
if
not
isinstance
(
fill
,
collections
.
abc
.
Sequence
)
or
len
(
fill
)
>
1
:
return
False
return
image_loader
.
num_channels
>
1
xfails_pil_if_fill_sequence_needs_broadcast
=
xfails_pil
(
"PIL kernel doesn't support sequences of length 1 for `fill` if the number of color channels is larger."
,
condition
=
fill_sequence_needs_broadcast
,
)
DISPATCHER_INFOS
=
[
DISPATCHER_INFOS
=
[
DispatcherInfo
(
DispatcherInfo
(
F
.
resized_crop
,
F
.
resized_crop
,
...
@@ -159,14 +131,6 @@ DISPATCHER_INFOS = [
...
@@ -159,14 +131,6 @@ DISPATCHER_INFOS = [
},
},
pil_kernel_info
=
PILKernelInfo
(
F
.
_pad_image_pil
,
kernel_name
=
"pad_image_pil"
),
pil_kernel_info
=
PILKernelInfo
(
F
.
_pad_image_pil
,
kernel_name
=
"pad_image_pil"
),
test_marks
=
[
test_marks
=
[
*
xfails_pil
(
reason
=
(
"PIL kernel doesn't support sequences of length 1 for argument `fill` and "
"`padding_mode='constant'`, if the number of color channels is larger."
),
condition
=
lambda
args_kwargs
:
fill_sequence_needs_broadcast
(
args_kwargs
)
and
args_kwargs
.
kwargs
.
get
(
"padding_mode"
,
"constant"
)
==
"constant"
,
),
xfail_jit
(
"F.pad only supports vector fills for list of floats"
,
condition
=
pad_xfail_jit_fill_condition
),
xfail_jit
(
"F.pad only supports vector fills for list of floats"
,
condition
=
pad_xfail_jit_fill_condition
),
xfail_jit_python_scalar_arg
(
"padding"
),
xfail_jit_python_scalar_arg
(
"padding"
),
],
],
...
@@ -181,7 +145,6 @@ DISPATCHER_INFOS = [
...
@@ -181,7 +145,6 @@ DISPATCHER_INFOS = [
},
},
pil_kernel_info
=
PILKernelInfo
(
F
.
_perspective_image_pil
),
pil_kernel_info
=
PILKernelInfo
(
F
.
_perspective_image_pil
),
test_marks
=
[
test_marks
=
[
*
xfails_pil_if_fill_sequence_needs_broadcast
,
xfail_jit_python_scalar_arg
(
"fill"
),
xfail_jit_python_scalar_arg
(
"fill"
),
],
],
),
),
...
...
torchvision/transforms/_functional_pil.py
View file @
d0e16b76
...
@@ -264,11 +264,13 @@ def _parse_fill(
...
@@ -264,11 +264,13 @@ def _parse_fill(
if
isinstance
(
fill
,
(
int
,
float
))
and
num_channels
>
1
:
if
isinstance
(
fill
,
(
int
,
float
))
and
num_channels
>
1
:
fill
=
tuple
([
fill
]
*
num_channels
)
fill
=
tuple
([
fill
]
*
num_channels
)
if
isinstance
(
fill
,
(
list
,
tuple
)):
if
isinstance
(
fill
,
(
list
,
tuple
)):
if
len
(
fill
)
!=
num_channels
:
if
len
(
fill
)
==
1
:
fill
=
fill
*
num_channels
elif
len
(
fill
)
!=
num_channels
:
msg
=
"The number of elements in 'fill' does not match the number of channels of the image ({} != {})"
msg
=
"The number of elements in 'fill' does not match the number of channels of the image ({} != {})"
raise
ValueError
(
msg
.
format
(
len
(
fill
),
num_channels
))
raise
ValueError
(
msg
.
format
(
len
(
fill
),
num_channels
))
fill
=
tuple
(
fill
)
fill
=
tuple
(
fill
)
# type: ignore[arg-type]
if
img
.
mode
!=
"F"
:
if
img
.
mode
!=
"F"
:
if
isinstance
(
fill
,
(
list
,
tuple
)):
if
isinstance
(
fill
,
(
list
,
tuple
)):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment