Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
55d3ba62
Unverified
Commit
55d3ba62
authored
Feb 16, 2023
by
Nicolas Hug
Committed by
GitHub
Feb 16, 2023
Browse files
Deprecate functional_pil and functional_tensor and make them private (#7269)
parent
3991ab99
Changes
13
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
1383 additions
and
1361 deletions
+1383
-1361
test/prototype_common_utils.py
test/prototype_common_utils.py
+1
-1
test/prototype_transforms_kernel_infos.py
test/prototype_transforms_kernel_infos.py
+1
-1
test/test_functional_tensor.py
test/test_functional_tensor.py
+2
-2
test/test_transforms.py
test/test_transforms.py
+1
-1
torchvision/transforms/_functional_pil.py
torchvision/transforms/_functional_pil.py
+391
-0
torchvision/transforms/_functional_tensor.py
torchvision/transforms/_functional_tensor.py
+962
-0
torchvision/transforms/functional.py
torchvision/transforms/functional.py
+1
-1
torchvision/transforms/functional_pil.py
torchvision/transforms/functional_pil.py
+9
-389
torchvision/transforms/functional_tensor.py
torchvision/transforms/functional_tensor.py
+8
-959
torchvision/transforms/v2/_auto_augment.py
torchvision/transforms/v2/_auto_augment.py
+1
-1
torchvision/transforms/v2/functional/_color.py
torchvision/transforms/v2/functional/_color.py
+2
-2
torchvision/transforms/v2/functional/_geometry.py
torchvision/transforms/v2/functional/_geometry.py
+2
-2
torchvision/transforms/v2/functional/_meta.py
torchvision/transforms/v2/functional/_meta.py
+2
-2
No files found.
test/prototype_common_utils.py
View file @
55d3ba62
...
@@ -17,7 +17,7 @@ from datasets_utils import combinations_grid
...
@@ -17,7 +17,7 @@ from datasets_utils import combinations_grid
from
torch.nn.functional
import
one_hot
from
torch.nn.functional
import
one_hot
from
torch.testing._comparison
import
BooleanPair
,
NonePair
,
not_close_error_metas
,
NumberPair
,
TensorLikePair
from
torch.testing._comparison
import
BooleanPair
,
NonePair
,
not_close_error_metas
,
NumberPair
,
TensorLikePair
from
torchvision
import
datapoints
from
torchvision
import
datapoints
from
torchvision.transforms.functional_tensor
import
_max_value
as
get_max_value
from
torchvision.transforms.
_
functional_tensor
import
_max_value
as
get_max_value
from
torchvision.transforms.v2.functional
import
convert_dtype_image_tensor
,
to_image_tensor
from
torchvision.transforms.v2.functional
import
convert_dtype_image_tensor
,
to_image_tensor
__all__
=
[
__all__
=
[
...
...
test/prototype_transforms_kernel_infos.py
View file @
55d3ba62
...
@@ -29,7 +29,7 @@ from prototype_common_utils import (
...
@@ -29,7 +29,7 @@ from prototype_common_utils import (
)
)
from
torch.utils._pytree
import
tree_map
from
torch.utils._pytree
import
tree_map
from
torchvision
import
datapoints
from
torchvision
import
datapoints
from
torchvision.transforms.functional_tensor
import
_max_value
as
get_max_value
,
_parse_pad_padding
from
torchvision.transforms.
_
functional_tensor
import
_max_value
as
get_max_value
,
_parse_pad_padding
__all__
=
[
"KernelInfo"
,
"KERNEL_INFOS"
]
__all__
=
[
"KernelInfo"
,
"KERNEL_INFOS"
]
...
...
test/test_functional_tensor.py
View file @
55d3ba62
...
@@ -11,9 +11,9 @@ import PIL.Image
...
@@ -11,9 +11,9 @@ import PIL.Image
import
pytest
import
pytest
import
torch
import
torch
import
torchvision.transforms
as
T
import
torchvision.transforms
as
T
import
torchvision.transforms._functional_pil
as
F_pil
import
torchvision.transforms._functional_tensor
as
F_t
import
torchvision.transforms.functional
as
F
import
torchvision.transforms.functional
as
F
import
torchvision.transforms.functional_pil
as
F_pil
import
torchvision.transforms.functional_tensor
as
F_t
from
common_utils
import
(
from
common_utils
import
(
_assert_approx_equal_tensor_to_pil
,
_assert_approx_equal_tensor_to_pil
,
_assert_equal_tensor_to_pil
,
_assert_equal_tensor_to_pil
,
...
...
test/test_transforms.py
View file @
55d3ba62
...
@@ -9,8 +9,8 @@ import numpy as np
...
@@ -9,8 +9,8 @@ import numpy as np
import
pytest
import
pytest
import
torch
import
torch
import
torchvision.transforms
as
transforms
import
torchvision.transforms
as
transforms
import
torchvision.transforms._functional_tensor
as
F_t
import
torchvision.transforms.functional
as
F
import
torchvision.transforms.functional
as
F
import
torchvision.transforms.functional_tensor
as
F_t
from
PIL
import
Image
from
PIL
import
Image
from
torch._utils_internal
import
get_file_path_2
from
torch._utils_internal
import
get_file_path_2
...
...
torchvision/transforms/_functional_pil.py
0 → 100644
View file @
55d3ba62
import
numbers
from
typing
import
Any
,
Dict
,
List
,
Literal
,
Optional
,
Sequence
,
Tuple
,
Union
import
numpy
as
np
import
torch
from
PIL
import
Image
,
ImageEnhance
,
ImageOps
try
:
import
accimage
except
ImportError
:
accimage
=
None
@
torch
.
jit
.
unused
def
_is_pil_image
(
img
:
Any
)
->
bool
:
if
accimage
is
not
None
:
return
isinstance
(
img
,
(
Image
.
Image
,
accimage
.
Image
))
else
:
return
isinstance
(
img
,
Image
.
Image
)
@
torch
.
jit
.
unused
def
get_dimensions
(
img
:
Any
)
->
List
[
int
]:
if
_is_pil_image
(
img
):
if
hasattr
(
img
,
"getbands"
):
channels
=
len
(
img
.
getbands
())
else
:
channels
=
img
.
channels
width
,
height
=
img
.
size
return
[
channels
,
height
,
width
]
raise
TypeError
(
f
"Unexpected type
{
type
(
img
)
}
"
)
@
torch
.
jit
.
unused
def
get_image_size
(
img
:
Any
)
->
List
[
int
]:
if
_is_pil_image
(
img
):
return
list
(
img
.
size
)
raise
TypeError
(
f
"Unexpected type
{
type
(
img
)
}
"
)
@
torch
.
jit
.
unused
def
get_image_num_channels
(
img
:
Any
)
->
int
:
if
_is_pil_image
(
img
):
if
hasattr
(
img
,
"getbands"
):
return
len
(
img
.
getbands
())
else
:
return
img
.
channels
raise
TypeError
(
f
"Unexpected type
{
type
(
img
)
}
"
)
@
torch
.
jit
.
unused
def
hflip
(
img
:
Image
.
Image
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
img
.
transpose
(
Image
.
FLIP_LEFT_RIGHT
)
@
torch
.
jit
.
unused
def
vflip
(
img
:
Image
.
Image
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
img
.
transpose
(
Image
.
FLIP_TOP_BOTTOM
)
@
torch
.
jit
.
unused
def
adjust_brightness
(
img
:
Image
.
Image
,
brightness_factor
:
float
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
enhancer
=
ImageEnhance
.
Brightness
(
img
)
img
=
enhancer
.
enhance
(
brightness_factor
)
return
img
@
torch
.
jit
.
unused
def
adjust_contrast
(
img
:
Image
.
Image
,
contrast_factor
:
float
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
enhancer
=
ImageEnhance
.
Contrast
(
img
)
img
=
enhancer
.
enhance
(
contrast_factor
)
return
img
@
torch
.
jit
.
unused
def
adjust_saturation
(
img
:
Image
.
Image
,
saturation_factor
:
float
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
enhancer
=
ImageEnhance
.
Color
(
img
)
img
=
enhancer
.
enhance
(
saturation_factor
)
return
img
@
torch
.
jit
.
unused
def
adjust_hue
(
img
:
Image
.
Image
,
hue_factor
:
float
)
->
Image
.
Image
:
if
not
(
-
0.5
<=
hue_factor
<=
0.5
):
raise
ValueError
(
f
"hue_factor (
{
hue_factor
}
) is not in [-0.5, 0.5]."
)
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
input_mode
=
img
.
mode
if
input_mode
in
{
"L"
,
"1"
,
"I"
,
"F"
}:
return
img
h
,
s
,
v
=
img
.
convert
(
"HSV"
).
split
()
np_h
=
np
.
array
(
h
,
dtype
=
np
.
uint8
)
# uint8 addition take cares of rotation across boundaries
with
np
.
errstate
(
over
=
"ignore"
):
np_h
+=
np
.
uint8
(
hue_factor
*
255
)
h
=
Image
.
fromarray
(
np_h
,
"L"
)
img
=
Image
.
merge
(
"HSV"
,
(
h
,
s
,
v
)).
convert
(
input_mode
)
return
img
@
torch
.
jit
.
unused
def
adjust_gamma
(
img
:
Image
.
Image
,
gamma
:
float
,
gain
:
float
=
1.0
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
if
gamma
<
0
:
raise
ValueError
(
"Gamma should be a non-negative real number"
)
input_mode
=
img
.
mode
img
=
img
.
convert
(
"RGB"
)
gamma_map
=
[
int
((
255
+
1
-
1e-3
)
*
gain
*
pow
(
ele
/
255.0
,
gamma
))
for
ele
in
range
(
256
)]
*
3
img
=
img
.
point
(
gamma_map
)
# use PIL's point-function to accelerate this part
img
=
img
.
convert
(
input_mode
)
return
img
@
torch
.
jit
.
unused
def
pad
(
img
:
Image
.
Image
,
padding
:
Union
[
int
,
List
[
int
],
Tuple
[
int
,
...]],
fill
:
Optional
[
Union
[
float
,
List
[
float
],
Tuple
[
float
,
...]]]
=
0
,
padding_mode
:
Literal
[
"constant"
,
"edge"
,
"reflect"
,
"symmetric"
]
=
"constant"
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
if
not
isinstance
(
padding
,
(
numbers
.
Number
,
tuple
,
list
)):
raise
TypeError
(
"Got inappropriate padding arg"
)
if
fill
is
not
None
and
not
isinstance
(
fill
,
(
numbers
.
Number
,
tuple
,
list
)):
raise
TypeError
(
"Got inappropriate fill arg"
)
if
not
isinstance
(
padding_mode
,
str
):
raise
TypeError
(
"Got inappropriate padding_mode arg"
)
if
isinstance
(
padding
,
list
):
padding
=
tuple
(
padding
)
if
isinstance
(
padding
,
tuple
)
and
len
(
padding
)
not
in
[
1
,
2
,
4
]:
raise
ValueError
(
f
"Padding must be an int or a 1, 2, or 4 element tuple, not a
{
len
(
padding
)
}
element tuple"
)
if
isinstance
(
padding
,
tuple
)
and
len
(
padding
)
==
1
:
# Compatibility with `functional_tensor.pad`
padding
=
padding
[
0
]
if
padding_mode
not
in
[
"constant"
,
"edge"
,
"reflect"
,
"symmetric"
]:
raise
ValueError
(
"Padding mode should be either constant, edge, reflect or symmetric"
)
if
padding_mode
==
"constant"
:
opts
=
_parse_fill
(
fill
,
img
,
name
=
"fill"
)
if
img
.
mode
==
"P"
:
palette
=
img
.
getpalette
()
image
=
ImageOps
.
expand
(
img
,
border
=
padding
,
**
opts
)
image
.
putpalette
(
palette
)
return
image
return
ImageOps
.
expand
(
img
,
border
=
padding
,
**
opts
)
else
:
if
isinstance
(
padding
,
int
):
pad_left
=
pad_right
=
pad_top
=
pad_bottom
=
padding
if
isinstance
(
padding
,
tuple
)
and
len
(
padding
)
==
2
:
pad_left
=
pad_right
=
padding
[
0
]
pad_top
=
pad_bottom
=
padding
[
1
]
if
isinstance
(
padding
,
tuple
)
and
len
(
padding
)
==
4
:
pad_left
=
padding
[
0
]
pad_top
=
padding
[
1
]
pad_right
=
padding
[
2
]
pad_bottom
=
padding
[
3
]
p
=
[
pad_left
,
pad_top
,
pad_right
,
pad_bottom
]
cropping
=
-
np
.
minimum
(
p
,
0
)
if
cropping
.
any
():
crop_left
,
crop_top
,
crop_right
,
crop_bottom
=
cropping
img
=
img
.
crop
((
crop_left
,
crop_top
,
img
.
width
-
crop_right
,
img
.
height
-
crop_bottom
))
pad_left
,
pad_top
,
pad_right
,
pad_bottom
=
np
.
maximum
(
p
,
0
)
if
img
.
mode
==
"P"
:
palette
=
img
.
getpalette
()
img
=
np
.
asarray
(
img
)
img
=
np
.
pad
(
img
,
((
pad_top
,
pad_bottom
),
(
pad_left
,
pad_right
)),
mode
=
padding_mode
)
img
=
Image
.
fromarray
(
img
)
img
.
putpalette
(
palette
)
return
img
img
=
np
.
asarray
(
img
)
# RGB image
if
len
(
img
.
shape
)
==
3
:
img
=
np
.
pad
(
img
,
((
pad_top
,
pad_bottom
),
(
pad_left
,
pad_right
),
(
0
,
0
)),
padding_mode
)
# Grayscale image
if
len
(
img
.
shape
)
==
2
:
img
=
np
.
pad
(
img
,
((
pad_top
,
pad_bottom
),
(
pad_left
,
pad_right
)),
padding_mode
)
return
Image
.
fromarray
(
img
)
@
torch
.
jit
.
unused
def
crop
(
img
:
Image
.
Image
,
top
:
int
,
left
:
int
,
height
:
int
,
width
:
int
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
img
.
crop
((
left
,
top
,
left
+
width
,
top
+
height
))
@
torch
.
jit
.
unused
def
resize
(
img
:
Image
.
Image
,
size
:
Union
[
List
[
int
],
int
],
interpolation
:
int
=
Image
.
BILINEAR
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
if
not
(
isinstance
(
size
,
list
)
and
len
(
size
)
==
2
):
raise
TypeError
(
f
"Got inappropriate size arg:
{
size
}
"
)
return
img
.
resize
(
tuple
(
size
[::
-
1
]),
interpolation
)
@
torch
.
jit
.
unused
def
_parse_fill
(
fill
:
Optional
[
Union
[
float
,
List
[
float
],
Tuple
[
float
,
...]]],
img
:
Image
.
Image
,
name
:
str
=
"fillcolor"
,
)
->
Dict
[
str
,
Optional
[
Union
[
float
,
List
[
float
],
Tuple
[
float
,
...]]]]:
# Process fill color for affine transforms
num_channels
=
get_image_num_channels
(
img
)
if
fill
is
None
:
fill
=
0
if
isinstance
(
fill
,
(
int
,
float
))
and
num_channels
>
1
:
fill
=
tuple
([
fill
]
*
num_channels
)
if
isinstance
(
fill
,
(
list
,
tuple
)):
if
len
(
fill
)
!=
num_channels
:
msg
=
"The number of elements in 'fill' does not match the number of channels of the image ({} != {})"
raise
ValueError
(
msg
.
format
(
len
(
fill
),
num_channels
))
fill
=
tuple
(
fill
)
if
img
.
mode
!=
"F"
:
if
isinstance
(
fill
,
(
list
,
tuple
)):
fill
=
tuple
(
int
(
x
)
for
x
in
fill
)
else
:
fill
=
int
(
fill
)
return
{
name
:
fill
}
@
torch
.
jit
.
unused
def
affine
(
img
:
Image
.
Image
,
matrix
:
List
[
float
],
interpolation
:
int
=
Image
.
NEAREST
,
fill
:
Optional
[
Union
[
int
,
float
,
Sequence
[
int
],
Sequence
[
float
]]]
=
None
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
output_size
=
img
.
size
opts
=
_parse_fill
(
fill
,
img
)
return
img
.
transform
(
output_size
,
Image
.
AFFINE
,
matrix
,
interpolation
,
**
opts
)
@
torch
.
jit
.
unused
def
rotate
(
img
:
Image
.
Image
,
angle
:
float
,
interpolation
:
int
=
Image
.
NEAREST
,
expand
:
bool
=
False
,
center
:
Optional
[
Tuple
[
int
,
int
]]
=
None
,
fill
:
Optional
[
Union
[
int
,
float
,
Sequence
[
int
],
Sequence
[
float
]]]
=
None
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
opts
=
_parse_fill
(
fill
,
img
)
return
img
.
rotate
(
angle
,
interpolation
,
expand
,
center
,
**
opts
)
@
torch
.
jit
.
unused
def
perspective
(
img
:
Image
.
Image
,
perspective_coeffs
:
List
[
float
],
interpolation
:
int
=
Image
.
BICUBIC
,
fill
:
Optional
[
Union
[
int
,
float
,
Sequence
[
int
],
Sequence
[
float
]]]
=
None
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
opts
=
_parse_fill
(
fill
,
img
)
return
img
.
transform
(
img
.
size
,
Image
.
PERSPECTIVE
,
perspective_coeffs
,
interpolation
,
**
opts
)
@
torch
.
jit
.
unused
def
to_grayscale
(
img
:
Image
.
Image
,
num_output_channels
:
int
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
if
num_output_channels
==
1
:
img
=
img
.
convert
(
"L"
)
elif
num_output_channels
==
3
:
img
=
img
.
convert
(
"L"
)
np_img
=
np
.
array
(
img
,
dtype
=
np
.
uint8
)
np_img
=
np
.
dstack
([
np_img
,
np_img
,
np_img
])
img
=
Image
.
fromarray
(
np_img
,
"RGB"
)
else
:
raise
ValueError
(
"num_output_channels should be either 1 or 3"
)
return
img
@
torch
.
jit
.
unused
def
invert
(
img
:
Image
.
Image
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
ImageOps
.
invert
(
img
)
@
torch
.
jit
.
unused
def
posterize
(
img
:
Image
.
Image
,
bits
:
int
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
ImageOps
.
posterize
(
img
,
bits
)
@
torch
.
jit
.
unused
def
solarize
(
img
:
Image
.
Image
,
threshold
:
int
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
ImageOps
.
solarize
(
img
,
threshold
)
@
torch
.
jit
.
unused
def
adjust_sharpness
(
img
:
Image
.
Image
,
sharpness_factor
:
float
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
enhancer
=
ImageEnhance
.
Sharpness
(
img
)
img
=
enhancer
.
enhance
(
sharpness_factor
)
return
img
@
torch
.
jit
.
unused
def
autocontrast
(
img
:
Image
.
Image
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
ImageOps
.
autocontrast
(
img
)
@
torch
.
jit
.
unused
def
equalize
(
img
:
Image
.
Image
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
ImageOps
.
equalize
(
img
)
torchvision/transforms/_functional_tensor.py
0 → 100644
View file @
55d3ba62
This diff is collapsed.
Click to expand it.
torchvision/transforms/functional.py
View file @
55d3ba62
...
@@ -15,7 +15,7 @@ except ImportError:
...
@@ -15,7 +15,7 @@ except ImportError:
accimage
=
None
accimage
=
None
from
..utils
import
_log_api_usage_once
from
..utils
import
_log_api_usage_once
from
.
import
functional_pil
as
F_pil
,
functional_tensor
as
F_t
from
.
import
_
functional_pil
as
F_pil
,
_
functional_tensor
as
F_t
class
InterpolationMode
(
Enum
):
class
InterpolationMode
(
Enum
):
...
...
torchvision/transforms/functional_pil.py
View file @
55d3ba62
import
numbers
import
warnings
from
typing
import
Any
,
Dict
,
List
,
Literal
,
Optional
,
Sequence
,
Tuple
,
Union
import
numpy
as
np
from
torchvision.transforms._functional_pil
import
*
# noqa
import
torch
from
PIL
import
Image
,
ImageEnhance
,
ImageOps
try
:
warnings
.
warn
(
import
accimage
"The torchvision.transforms.functional_pil module is deprecated "
except
ImportError
:
"in 0.15 and will be **removed in 0.17**. Please don't rely on it. "
accimage
=
None
"You probably just need to use APIs in "
"torchvision.transforms.functional or in "
"torchvision.transforms.v2.functional."
@
torch
.
jit
.
unused
)
def
_is_pil_image
(
img
:
Any
)
->
bool
:
if
accimage
is
not
None
:
return
isinstance
(
img
,
(
Image
.
Image
,
accimage
.
Image
))
else
:
return
isinstance
(
img
,
Image
.
Image
)
@
torch
.
jit
.
unused
def
get_dimensions
(
img
:
Any
)
->
List
[
int
]:
if
_is_pil_image
(
img
):
if
hasattr
(
img
,
"getbands"
):
channels
=
len
(
img
.
getbands
())
else
:
channels
=
img
.
channels
width
,
height
=
img
.
size
return
[
channels
,
height
,
width
]
raise
TypeError
(
f
"Unexpected type
{
type
(
img
)
}
"
)
@
torch
.
jit
.
unused
def
get_image_size
(
img
:
Any
)
->
List
[
int
]:
if
_is_pil_image
(
img
):
return
list
(
img
.
size
)
raise
TypeError
(
f
"Unexpected type
{
type
(
img
)
}
"
)
@
torch
.
jit
.
unused
def
get_image_num_channels
(
img
:
Any
)
->
int
:
if
_is_pil_image
(
img
):
if
hasattr
(
img
,
"getbands"
):
return
len
(
img
.
getbands
())
else
:
return
img
.
channels
raise
TypeError
(
f
"Unexpected type
{
type
(
img
)
}
"
)
@
torch
.
jit
.
unused
def
hflip
(
img
:
Image
.
Image
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
img
.
transpose
(
Image
.
FLIP_LEFT_RIGHT
)
@
torch
.
jit
.
unused
def
vflip
(
img
:
Image
.
Image
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
img
.
transpose
(
Image
.
FLIP_TOP_BOTTOM
)
@
torch
.
jit
.
unused
def
adjust_brightness
(
img
:
Image
.
Image
,
brightness_factor
:
float
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
enhancer
=
ImageEnhance
.
Brightness
(
img
)
img
=
enhancer
.
enhance
(
brightness_factor
)
return
img
@
torch
.
jit
.
unused
def
adjust_contrast
(
img
:
Image
.
Image
,
contrast_factor
:
float
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
enhancer
=
ImageEnhance
.
Contrast
(
img
)
img
=
enhancer
.
enhance
(
contrast_factor
)
return
img
@
torch
.
jit
.
unused
def
adjust_saturation
(
img
:
Image
.
Image
,
saturation_factor
:
float
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
enhancer
=
ImageEnhance
.
Color
(
img
)
img
=
enhancer
.
enhance
(
saturation_factor
)
return
img
@
torch
.
jit
.
unused
def
adjust_hue
(
img
:
Image
.
Image
,
hue_factor
:
float
)
->
Image
.
Image
:
if
not
(
-
0.5
<=
hue_factor
<=
0.5
):
raise
ValueError
(
f
"hue_factor (
{
hue_factor
}
) is not in [-0.5, 0.5]."
)
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
input_mode
=
img
.
mode
if
input_mode
in
{
"L"
,
"1"
,
"I"
,
"F"
}:
return
img
h
,
s
,
v
=
img
.
convert
(
"HSV"
).
split
()
np_h
=
np
.
array
(
h
,
dtype
=
np
.
uint8
)
# uint8 addition take cares of rotation across boundaries
with
np
.
errstate
(
over
=
"ignore"
):
np_h
+=
np
.
uint8
(
hue_factor
*
255
)
h
=
Image
.
fromarray
(
np_h
,
"L"
)
img
=
Image
.
merge
(
"HSV"
,
(
h
,
s
,
v
)).
convert
(
input_mode
)
return
img
@
torch
.
jit
.
unused
def
adjust_gamma
(
img
:
Image
.
Image
,
gamma
:
float
,
gain
:
float
=
1.0
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
if
gamma
<
0
:
raise
ValueError
(
"Gamma should be a non-negative real number"
)
input_mode
=
img
.
mode
img
=
img
.
convert
(
"RGB"
)
gamma_map
=
[
int
((
255
+
1
-
1e-3
)
*
gain
*
pow
(
ele
/
255.0
,
gamma
))
for
ele
in
range
(
256
)]
*
3
img
=
img
.
point
(
gamma_map
)
# use PIL's point-function to accelerate this part
img
=
img
.
convert
(
input_mode
)
return
img
@
torch
.
jit
.
unused
def
pad
(
img
:
Image
.
Image
,
padding
:
Union
[
int
,
List
[
int
],
Tuple
[
int
,
...]],
fill
:
Optional
[
Union
[
float
,
List
[
float
],
Tuple
[
float
,
...]]]
=
0
,
padding_mode
:
Literal
[
"constant"
,
"edge"
,
"reflect"
,
"symmetric"
]
=
"constant"
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
if
not
isinstance
(
padding
,
(
numbers
.
Number
,
tuple
,
list
)):
raise
TypeError
(
"Got inappropriate padding arg"
)
if
fill
is
not
None
and
not
isinstance
(
fill
,
(
numbers
.
Number
,
tuple
,
list
)):
raise
TypeError
(
"Got inappropriate fill arg"
)
if
not
isinstance
(
padding_mode
,
str
):
raise
TypeError
(
"Got inappropriate padding_mode arg"
)
if
isinstance
(
padding
,
list
):
padding
=
tuple
(
padding
)
if
isinstance
(
padding
,
tuple
)
and
len
(
padding
)
not
in
[
1
,
2
,
4
]:
raise
ValueError
(
f
"Padding must be an int or a 1, 2, or 4 element tuple, not a
{
len
(
padding
)
}
element tuple"
)
if
isinstance
(
padding
,
tuple
)
and
len
(
padding
)
==
1
:
# Compatibility with `functional_tensor.pad`
padding
=
padding
[
0
]
if
padding_mode
not
in
[
"constant"
,
"edge"
,
"reflect"
,
"symmetric"
]:
raise
ValueError
(
"Padding mode should be either constant, edge, reflect or symmetric"
)
if
padding_mode
==
"constant"
:
opts
=
_parse_fill
(
fill
,
img
,
name
=
"fill"
)
if
img
.
mode
==
"P"
:
palette
=
img
.
getpalette
()
image
=
ImageOps
.
expand
(
img
,
border
=
padding
,
**
opts
)
image
.
putpalette
(
palette
)
return
image
return
ImageOps
.
expand
(
img
,
border
=
padding
,
**
opts
)
else
:
if
isinstance
(
padding
,
int
):
pad_left
=
pad_right
=
pad_top
=
pad_bottom
=
padding
if
isinstance
(
padding
,
tuple
)
and
len
(
padding
)
==
2
:
pad_left
=
pad_right
=
padding
[
0
]
pad_top
=
pad_bottom
=
padding
[
1
]
if
isinstance
(
padding
,
tuple
)
and
len
(
padding
)
==
4
:
pad_left
=
padding
[
0
]
pad_top
=
padding
[
1
]
pad_right
=
padding
[
2
]
pad_bottom
=
padding
[
3
]
p
=
[
pad_left
,
pad_top
,
pad_right
,
pad_bottom
]
cropping
=
-
np
.
minimum
(
p
,
0
)
if
cropping
.
any
():
crop_left
,
crop_top
,
crop_right
,
crop_bottom
=
cropping
img
=
img
.
crop
((
crop_left
,
crop_top
,
img
.
width
-
crop_right
,
img
.
height
-
crop_bottom
))
pad_left
,
pad_top
,
pad_right
,
pad_bottom
=
np
.
maximum
(
p
,
0
)
if
img
.
mode
==
"P"
:
palette
=
img
.
getpalette
()
img
=
np
.
asarray
(
img
)
img
=
np
.
pad
(
img
,
((
pad_top
,
pad_bottom
),
(
pad_left
,
pad_right
)),
mode
=
padding_mode
)
img
=
Image
.
fromarray
(
img
)
img
.
putpalette
(
palette
)
return
img
img
=
np
.
asarray
(
img
)
# RGB image
if
len
(
img
.
shape
)
==
3
:
img
=
np
.
pad
(
img
,
((
pad_top
,
pad_bottom
),
(
pad_left
,
pad_right
),
(
0
,
0
)),
padding_mode
)
# Grayscale image
if
len
(
img
.
shape
)
==
2
:
img
=
np
.
pad
(
img
,
((
pad_top
,
pad_bottom
),
(
pad_left
,
pad_right
)),
padding_mode
)
return
Image
.
fromarray
(
img
)
@
torch
.
jit
.
unused
def
crop
(
img
:
Image
.
Image
,
top
:
int
,
left
:
int
,
height
:
int
,
width
:
int
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
img
.
crop
((
left
,
top
,
left
+
width
,
top
+
height
))
@
torch
.
jit
.
unused
def
resize
(
img
:
Image
.
Image
,
size
:
Union
[
List
[
int
],
int
],
interpolation
:
int
=
Image
.
BILINEAR
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
if
not
(
isinstance
(
size
,
list
)
and
len
(
size
)
==
2
):
raise
TypeError
(
f
"Got inappropriate size arg:
{
size
}
"
)
return
img
.
resize
(
tuple
(
size
[::
-
1
]),
interpolation
)
@
torch
.
jit
.
unused
def
_parse_fill
(
fill
:
Optional
[
Union
[
float
,
List
[
float
],
Tuple
[
float
,
...]]],
img
:
Image
.
Image
,
name
:
str
=
"fillcolor"
,
)
->
Dict
[
str
,
Optional
[
Union
[
float
,
List
[
float
],
Tuple
[
float
,
...]]]]:
# Process fill color for affine transforms
num_channels
=
get_image_num_channels
(
img
)
if
fill
is
None
:
fill
=
0
if
isinstance
(
fill
,
(
int
,
float
))
and
num_channels
>
1
:
fill
=
tuple
([
fill
]
*
num_channels
)
if
isinstance
(
fill
,
(
list
,
tuple
)):
if
len
(
fill
)
!=
num_channels
:
msg
=
"The number of elements in 'fill' does not match the number of channels of the image ({} != {})"
raise
ValueError
(
msg
.
format
(
len
(
fill
),
num_channels
))
fill
=
tuple
(
fill
)
if
img
.
mode
!=
"F"
:
if
isinstance
(
fill
,
(
list
,
tuple
)):
fill
=
tuple
(
int
(
x
)
for
x
in
fill
)
else
:
fill
=
int
(
fill
)
return
{
name
:
fill
}
@
torch
.
jit
.
unused
def
affine
(
img
:
Image
.
Image
,
matrix
:
List
[
float
],
interpolation
:
int
=
Image
.
NEAREST
,
fill
:
Optional
[
Union
[
int
,
float
,
Sequence
[
int
],
Sequence
[
float
]]]
=
None
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
output_size
=
img
.
size
opts
=
_parse_fill
(
fill
,
img
)
return
img
.
transform
(
output_size
,
Image
.
AFFINE
,
matrix
,
interpolation
,
**
opts
)
@
torch
.
jit
.
unused
def
rotate
(
img
:
Image
.
Image
,
angle
:
float
,
interpolation
:
int
=
Image
.
NEAREST
,
expand
:
bool
=
False
,
center
:
Optional
[
Tuple
[
int
,
int
]]
=
None
,
fill
:
Optional
[
Union
[
int
,
float
,
Sequence
[
int
],
Sequence
[
float
]]]
=
None
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
opts
=
_parse_fill
(
fill
,
img
)
return
img
.
rotate
(
angle
,
interpolation
,
expand
,
center
,
**
opts
)
@
torch
.
jit
.
unused
def
perspective
(
img
:
Image
.
Image
,
perspective_coeffs
:
List
[
float
],
interpolation
:
int
=
Image
.
BICUBIC
,
fill
:
Optional
[
Union
[
int
,
float
,
Sequence
[
int
],
Sequence
[
float
]]]
=
None
,
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
opts
=
_parse_fill
(
fill
,
img
)
return
img
.
transform
(
img
.
size
,
Image
.
PERSPECTIVE
,
perspective_coeffs
,
interpolation
,
**
opts
)
@
torch
.
jit
.
unused
def
to_grayscale
(
img
:
Image
.
Image
,
num_output_channels
:
int
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
if
num_output_channels
==
1
:
img
=
img
.
convert
(
"L"
)
elif
num_output_channels
==
3
:
img
=
img
.
convert
(
"L"
)
np_img
=
np
.
array
(
img
,
dtype
=
np
.
uint8
)
np_img
=
np
.
dstack
([
np_img
,
np_img
,
np_img
])
img
=
Image
.
fromarray
(
np_img
,
"RGB"
)
else
:
raise
ValueError
(
"num_output_channels should be either 1 or 3"
)
return
img
@
torch
.
jit
.
unused
def
invert
(
img
:
Image
.
Image
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
ImageOps
.
invert
(
img
)
@
torch
.
jit
.
unused
def
posterize
(
img
:
Image
.
Image
,
bits
:
int
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
ImageOps
.
posterize
(
img
,
bits
)
@
torch
.
jit
.
unused
def
solarize
(
img
:
Image
.
Image
,
threshold
:
int
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
ImageOps
.
solarize
(
img
,
threshold
)
@
torch
.
jit
.
unused
def
adjust_sharpness
(
img
:
Image
.
Image
,
sharpness_factor
:
float
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
enhancer
=
ImageEnhance
.
Sharpness
(
img
)
img
=
enhancer
.
enhance
(
sharpness_factor
)
return
img
@
torch
.
jit
.
unused
def
autocontrast
(
img
:
Image
.
Image
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
ImageOps
.
autocontrast
(
img
)
@
torch
.
jit
.
unused
def
equalize
(
img
:
Image
.
Image
)
->
Image
.
Image
:
if
not
_is_pil_image
(
img
):
raise
TypeError
(
f
"img should be PIL Image. Got
{
type
(
img
)
}
"
)
return
ImageOps
.
equalize
(
img
)
torchvision/transforms/functional_tensor.py
View file @
55d3ba62
This diff is collapsed.
Click to expand it.
torchvision/transforms/v2/_auto_augment.py
View file @
55d3ba62
...
@@ -6,7 +6,7 @@ import torch
...
@@ -6,7 +6,7 @@ import torch
from
torch.utils._pytree
import
tree_flatten
,
tree_unflatten
,
TreeSpec
from
torch.utils._pytree
import
tree_flatten
,
tree_unflatten
,
TreeSpec
from
torchvision
import
datapoints
,
transforms
as
_transforms
from
torchvision
import
datapoints
,
transforms
as
_transforms
from
torchvision.transforms
import
functional_tensor
as
_FT
from
torchvision.transforms
import
_
functional_tensor
as
_FT
from
torchvision.transforms.v2
import
AutoAugmentPolicy
,
functional
as
F
,
InterpolationMode
,
Transform
from
torchvision.transforms.v2
import
AutoAugmentPolicy
,
functional
as
F
,
InterpolationMode
,
Transform
from
torchvision.transforms.v2.functional._geometry
import
_check_interpolation
from
torchvision.transforms.v2.functional._geometry
import
_check_interpolation
from
torchvision.transforms.v2.functional._meta
import
get_spatial_size
from
torchvision.transforms.v2.functional._meta
import
get_spatial_size
...
...
torchvision/transforms/v2/functional/_color.py
View file @
55d3ba62
...
@@ -4,8 +4,8 @@ import PIL.Image
...
@@ -4,8 +4,8 @@ import PIL.Image
import
torch
import
torch
from
torch.nn.functional
import
conv2d
from
torch.nn.functional
import
conv2d
from
torchvision
import
datapoints
from
torchvision
import
datapoints
from
torchvision.transforms
import
functional_pil
as
_FP
from
torchvision.transforms
import
_
functional_pil
as
_FP
from
torchvision.transforms.functional_tensor
import
_max_value
from
torchvision.transforms.
_
functional_tensor
import
_max_value
from
torchvision.utils
import
_log_api_usage_once
from
torchvision.utils
import
_log_api_usage_once
...
...
torchvision/transforms/v2/functional/_geometry.py
View file @
55d3ba62
...
@@ -8,7 +8,8 @@ import torch
...
@@ -8,7 +8,8 @@ import torch
from
torch.nn.functional
import
grid_sample
,
interpolate
,
pad
as
torch_pad
from
torch.nn.functional
import
grid_sample
,
interpolate
,
pad
as
torch_pad
from
torchvision
import
datapoints
from
torchvision
import
datapoints
from
torchvision.transforms
import
functional_pil
as
_FP
from
torchvision.transforms
import
_functional_pil
as
_FP
from
torchvision.transforms._functional_tensor
import
_pad_symmetric
from
torchvision.transforms.functional
import
(
from
torchvision.transforms.functional
import
(
_check_antialias
,
_check_antialias
,
_compute_resized_output_size
as
__compute_resized_output_size
,
_compute_resized_output_size
as
__compute_resized_output_size
,
...
@@ -19,7 +20,6 @@ from torchvision.transforms.functional import (
...
@@ -19,7 +20,6 @@ from torchvision.transforms.functional import (
pil_to_tensor
,
pil_to_tensor
,
to_pil_image
,
to_pil_image
,
)
)
from
torchvision.transforms.functional_tensor
import
_pad_symmetric
from
torchvision.utils
import
_log_api_usage_once
from
torchvision.utils
import
_log_api_usage_once
...
...
torchvision/transforms/v2/functional/_meta.py
View file @
55d3ba62
...
@@ -4,8 +4,8 @@ import PIL.Image
...
@@ -4,8 +4,8 @@ import PIL.Image
import
torch
import
torch
from
torchvision
import
datapoints
from
torchvision
import
datapoints
from
torchvision.datapoints
import
BoundingBoxFormat
from
torchvision.datapoints
import
BoundingBoxFormat
from
torchvision.transforms
import
functional_pil
as
_FP
from
torchvision.transforms
import
_
functional_pil
as
_FP
from
torchvision.transforms.functional_tensor
import
_max_value
from
torchvision.transforms.
_
functional_tensor
import
_max_value
from
torchvision.utils
import
_log_api_usage_once
from
torchvision.utils
import
_log_api_usage_once
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment