"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "0ea78f9707f3ccb0fe3eaa74f247e4d3d8f47b6b"
Unverified Commit 423ddcd0 authored by kylematoba's avatar kylematoba Committed by GitHub
Browse files

Update transforms for PIL deprecation (#5898)



* Update transforms for PIL deprecation

* Changes agreed at pytorch/vision#5898

* black, sort constants, version check

* Format tests

* Square brackets

* Update torchvision/transforms/_pil_constants.py
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
Co-authored-by: default avatarPhilip Meier <github.pmeier@posteo.de>
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent 4c02f103
...@@ -412,12 +412,13 @@ class TestONNXExporter: ...@@ -412,12 +412,13 @@ class TestONNXExporter:
def get_image(self, rel_path: str, size: Tuple[int, int]) -> torch.Tensor: def get_image(self, rel_path: str, size: Tuple[int, int]) -> torch.Tensor:
import os import os
import torchvision.transforms._pil_constants as _pil_constants
from PIL import Image from PIL import Image
from torchvision.transforms import functional as F from torchvision.transforms import functional as F
data_dir = os.path.join(os.path.dirname(__file__), "assets") data_dir = os.path.join(os.path.dirname(__file__), "assets")
path = os.path.join(data_dir, *rel_path.split("/")) path = os.path.join(data_dir, *rel_path.split("/"))
image = Image.open(path).convert("RGB").resize(size, Image.BILINEAR) image = Image.open(path).convert("RGB").resize(size, _pil_constants.BILINEAR)
return F.convert_image_dtype(F.pil_to_tensor(image)) return F.convert_image_dtype(F.pil_to_tensor(image))
......
...@@ -8,6 +8,7 @@ import numpy as np ...@@ -8,6 +8,7 @@ import numpy as np
import pytest import pytest
import torch import torch
import torchvision.transforms as transforms import torchvision.transforms as transforms
import torchvision.transforms._pil_constants as _pil_constants
import torchvision.transforms.functional as F import torchvision.transforms.functional as F
import torchvision.transforms.functional_tensor as F_t import torchvision.transforms.functional_tensor as F_t
from PIL import Image from PIL import Image
...@@ -173,7 +174,7 @@ class TestAccImage: ...@@ -173,7 +174,7 @@ class TestAccImage:
def test_accimage_resize(self): def test_accimage_resize(self):
trans = transforms.Compose( trans = transforms.Compose(
[ [
transforms.Resize(256, interpolation=Image.LINEAR), transforms.Resize(256, interpolation=_pil_constants.LINEAR),
transforms.PILToTensor(), transforms.PILToTensor(),
transforms.ConvertImageDtype(dtype=torch.float), transforms.ConvertImageDtype(dtype=torch.float),
] ]
......
...@@ -4,6 +4,7 @@ import sys ...@@ -4,6 +4,7 @@ import sys
import numpy as np import numpy as np
import pytest import pytest
import torch import torch
import torchvision.transforms._pil_constants as _pil_constants
from common_utils import ( from common_utils import (
get_tmp_dir, get_tmp_dir,
int_dtypes, int_dtypes,
...@@ -15,7 +16,6 @@ from common_utils import ( ...@@ -15,7 +16,6 @@ from common_utils import (
cpu_and_gpu, cpu_and_gpu,
assert_equal, assert_equal,
) )
from PIL import Image
from torchvision import transforms as T from torchvision import transforms as T
from torchvision.transforms import InterpolationMode from torchvision.transforms import InterpolationMode
from torchvision.transforms import functional as F from torchvision.transforms import functional as F
...@@ -771,13 +771,13 @@ def test_autoaugment__op_apply_shear(interpolation, mode): ...@@ -771,13 +771,13 @@ def test_autoaugment__op_apply_shear(interpolation, mode):
matrix = (1, level, 0, 0, 1, 0) matrix = (1, level, 0, 0, 1, 0)
elif mode == "Y": elif mode == "Y":
matrix = (1, 0, 0, level, 1, 0) matrix = (1, 0, 0, level, 1, 0)
return pil_img.transform((image_size, image_size), Image.AFFINE, matrix, resample=resample) return pil_img.transform((image_size, image_size), _pil_constants.AFFINE, matrix, resample=resample)
t_img, pil_img = _create_data(image_size, image_size) t_img, pil_img = _create_data(image_size, image_size)
resample_pil = { resample_pil = {
F.InterpolationMode.NEAREST: Image.NEAREST, F.InterpolationMode.NEAREST: _pil_constants.NEAREST,
F.InterpolationMode.BILINEAR: Image.BILINEAR, F.InterpolationMode.BILINEAR: _pil_constants.BILINEAR,
}[interpolation] }[interpolation]
level = 0.3 level = 0.3
......
import PIL
from PIL import Image
# See https://pillow.readthedocs.io/en/stable/releasenotes/9.1.0.html#deprecations
# TODO: Remove this file once PIL minimal version is >= 9.1
if tuple(int(part) for part in PIL.__version__.split(".")) >= (9, 1):
BICUBIC = Image.Resampling.BICUBIC
BILINEAR = Image.Resampling.BILINEAR
LINEAR = Image.Resampling.BILINEAR
NEAREST = Image.Resampling.NEAREST
AFFINE = Image.Transform.AFFINE
FLIP_LEFT_RIGHT = Image.Transpose.FLIP_LEFT_RIGHT
FLIP_TOP_BOTTOM = Image.Transpose.FLIP_TOP_BOTTOM
PERSPECTIVE = Image.Transform.PERSPECTIVE
else:
BICUBIC = Image.BICUBIC
BILINEAR = Image.BILINEAR
NEAREST = Image.NEAREST
LINEAR = Image.LINEAR
AFFINE = Image.AFFINE
FLIP_LEFT_RIGHT = Image.FLIP_LEFT_RIGHT
FLIP_TOP_BOTTOM = Image.FLIP_TOP_BOTTOM
PERSPECTIVE = Image.PERSPECTIVE
...@@ -392,7 +392,7 @@ def resize( ...@@ -392,7 +392,7 @@ def resize(
:class:`torchvision.transforms.InterpolationMode`. :class:`torchvision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``, Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
max_size (int, optional): The maximum allowed for the longer edge of max_size (int, optional): The maximum allowed for the longer edge of
the resized image: if the longer edge of the image is greater the resized image: if the longer edge of the image is greater
than ``max_size`` after being resized according to ``size``, then than ``max_size`` after being resized according to ``size``, then
...@@ -572,7 +572,7 @@ def resized_crop( ...@@ -572,7 +572,7 @@ def resized_crop(
:class:`torchvision.transforms.InterpolationMode`. :class:`torchvision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``, Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
Returns: Returns:
PIL Image or Tensor: Cropped image. PIL Image or Tensor: Cropped image.
...@@ -652,7 +652,7 @@ def perspective( ...@@ -652,7 +652,7 @@ def perspective(
interpolation (InterpolationMode): Desired interpolation enum defined by interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively. image. If given a number, the value is used for all bands respectively.
...@@ -1012,7 +1012,7 @@ def rotate( ...@@ -1012,7 +1012,7 @@ def rotate(
interpolation (InterpolationMode): Desired interpolation enum defined by interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
expand (bool, optional): Optional expansion flag. expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image. If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image. If false or omitted, make the output image the same size as the input image.
...@@ -1105,7 +1105,7 @@ def affine( ...@@ -1105,7 +1105,7 @@ def affine(
interpolation (InterpolationMode): Desired interpolation enum defined by interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively. image. If given a number, the value is used for all bands respectively.
......
...@@ -10,6 +10,7 @@ try: ...@@ -10,6 +10,7 @@ try:
import accimage import accimage
except ImportError: except ImportError:
accimage = None accimage = None
from . import _pil_constants
@torch.jit.unused @torch.jit.unused
...@@ -54,7 +55,7 @@ def hflip(img: Image.Image) -> Image.Image: ...@@ -54,7 +55,7 @@ def hflip(img: Image.Image) -> Image.Image:
if not _is_pil_image(img): if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}") raise TypeError(f"img should be PIL Image. Got {type(img)}")
return img.transpose(Image.FLIP_LEFT_RIGHT) return img.transpose(_pil_constants.FLIP_LEFT_RIGHT)
@torch.jit.unused @torch.jit.unused
...@@ -62,7 +63,7 @@ def vflip(img: Image.Image) -> Image.Image: ...@@ -62,7 +63,7 @@ def vflip(img: Image.Image) -> Image.Image:
if not _is_pil_image(img): if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}") raise TypeError(f"img should be PIL Image. Got {type(img)}")
return img.transpose(Image.FLIP_TOP_BOTTOM) return img.transpose(_pil_constants.FLIP_TOP_BOTTOM)
@torch.jit.unused @torch.jit.unused
...@@ -240,7 +241,7 @@ def crop( ...@@ -240,7 +241,7 @@ def crop(
def resize( def resize(
img: Image.Image, img: Image.Image,
size: Union[Sequence[int], int], size: Union[Sequence[int], int],
interpolation: int = Image.BILINEAR, interpolation: int = _pil_constants.BILINEAR,
max_size: Optional[int] = None, max_size: Optional[int] = None,
) -> Image.Image: ) -> Image.Image:
...@@ -314,7 +315,7 @@ def _parse_fill( ...@@ -314,7 +315,7 @@ def _parse_fill(
def affine( def affine(
img: Image.Image, img: Image.Image,
matrix: List[float], matrix: List[float],
interpolation: int = Image.NEAREST, interpolation: int = _pil_constants.NEAREST,
fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0, fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
) -> Image.Image: ) -> Image.Image:
...@@ -323,14 +324,14 @@ def affine( ...@@ -323,14 +324,14 @@ def affine(
output_size = img.size output_size = img.size
opts = _parse_fill(fill, img) opts = _parse_fill(fill, img)
return img.transform(output_size, Image.AFFINE, matrix, interpolation, **opts) return img.transform(output_size, _pil_constants.AFFINE, matrix, interpolation, **opts)
@torch.jit.unused @torch.jit.unused
def rotate( def rotate(
img: Image.Image, img: Image.Image,
angle: float, angle: float,
interpolation: int = Image.NEAREST, interpolation: int = _pil_constants.NEAREST,
expand: bool = False, expand: bool = False,
center: Optional[Tuple[int, int]] = None, center: Optional[Tuple[int, int]] = None,
fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0, fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
...@@ -347,7 +348,7 @@ def rotate( ...@@ -347,7 +348,7 @@ def rotate(
def perspective( def perspective(
img: Image.Image, img: Image.Image,
perspective_coeffs: float, perspective_coeffs: float,
interpolation: int = Image.BICUBIC, interpolation: int = _pil_constants.BICUBIC,
fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0, fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
) -> Image.Image: ) -> Image.Image:
...@@ -356,7 +357,7 @@ def perspective( ...@@ -356,7 +357,7 @@ def perspective(
opts = _parse_fill(fill, img) opts = _parse_fill(fill, img)
return img.transform(img.size, Image.PERSPECTIVE, perspective_coeffs, interpolation, **opts) return img.transform(img.size, _pil_constants.PERSPECTIVE, perspective_coeffs, interpolation, **opts)
@torch.jit.unused @torch.jit.unused
......
...@@ -17,7 +17,6 @@ from ..utils import _log_api_usage_once ...@@ -17,7 +17,6 @@ from ..utils import _log_api_usage_once
from . import functional as F from . import functional as F
from .functional import InterpolationMode, _interpolation_modes_from_int from .functional import InterpolationMode, _interpolation_modes_from_int
__all__ = [ __all__ = [
"Compose", "Compose",
"ToTensor", "ToTensor",
...@@ -298,7 +297,7 @@ class Resize(torch.nn.Module): ...@@ -298,7 +297,7 @@ class Resize(torch.nn.Module):
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
``InterpolationMode.BICUBIC`` are supported. ``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
max_size (int, optional): The maximum allowed for the longer edge of max_size (int, optional): The maximum allowed for the longer edge of
the resized image: if the longer edge of the image is greater the resized image: if the longer edge of the image is greater
than ``max_size`` after being resized according to ``size``, then than ``max_size`` after being resized according to ``size``, then
...@@ -755,7 +754,7 @@ class RandomPerspective(torch.nn.Module): ...@@ -755,7 +754,7 @@ class RandomPerspective(torch.nn.Module):
interpolation (InterpolationMode): Desired interpolation enum defined by interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
fill (sequence or number): Pixel fill value for the area outside the transformed fill (sequence or number): Pixel fill value for the area outside the transformed
image. Default is ``0``. If given a number, the value is used for all bands respectively. image. Default is ``0``. If given a number, the value is used for all bands respectively.
""" """
...@@ -869,7 +868,7 @@ class RandomResizedCrop(torch.nn.Module): ...@@ -869,7 +868,7 @@ class RandomResizedCrop(torch.nn.Module):
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
``InterpolationMode.BICUBIC`` are supported. ``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
""" """
...@@ -1268,7 +1267,7 @@ class RandomRotation(torch.nn.Module): ...@@ -1268,7 +1267,7 @@ class RandomRotation(torch.nn.Module):
interpolation (InterpolationMode): Desired interpolation enum defined by interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
expand (bool, optional): Optional expansion flag. expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image. If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image. If false or omitted, make the output image the same size as the input image.
...@@ -1389,7 +1388,7 @@ class RandomAffine(torch.nn.Module): ...@@ -1389,7 +1388,7 @@ class RandomAffine(torch.nn.Module):
interpolation (InterpolationMode): Desired interpolation enum defined by interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
fill (sequence or number): Pixel fill value for the area outside the transformed fill (sequence or number): Pixel fill value for the area outside the transformed
image. Default is ``0``. If given a number, the value is used for all bands respectively. image. Default is ``0``. If given a number, the value is used for all bands respectively.
fillcolor (sequence or number, optional): fillcolor (sequence or number, optional):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment