Unverified Commit 423ddcd0 authored by kylematoba's avatar kylematoba Committed by GitHub
Browse files

Update transforms for PIL deprecation (#5898)



* Update transforms for PIL deprecation

* Changes agreed at pytorch/vision#5898

* black, sort constants, version check

* Format tests

* Square brackets

* Update torchvision/transforms/_pil_constants.py
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
Co-authored-by: default avatarPhilip Meier <github.pmeier@posteo.de>
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent 4c02f103
......@@ -412,12 +412,13 @@ class TestONNXExporter:
def get_image(self, rel_path: str, size: Tuple[int, int]) -> torch.Tensor:
import os
import torchvision.transforms._pil_constants as _pil_constants
from PIL import Image
from torchvision.transforms import functional as F
data_dir = os.path.join(os.path.dirname(__file__), "assets")
path = os.path.join(data_dir, *rel_path.split("/"))
image = Image.open(path).convert("RGB").resize(size, Image.BILINEAR)
image = Image.open(path).convert("RGB").resize(size, _pil_constants.BILINEAR)
return F.convert_image_dtype(F.pil_to_tensor(image))
......
......@@ -8,6 +8,7 @@ import numpy as np
import pytest
import torch
import torchvision.transforms as transforms
import torchvision.transforms._pil_constants as _pil_constants
import torchvision.transforms.functional as F
import torchvision.transforms.functional_tensor as F_t
from PIL import Image
......@@ -173,7 +174,7 @@ class TestAccImage:
def test_accimage_resize(self):
trans = transforms.Compose(
[
transforms.Resize(256, interpolation=Image.LINEAR),
transforms.Resize(256, interpolation=_pil_constants.LINEAR),
transforms.PILToTensor(),
transforms.ConvertImageDtype(dtype=torch.float),
]
......
......@@ -4,6 +4,7 @@ import sys
import numpy as np
import pytest
import torch
import torchvision.transforms._pil_constants as _pil_constants
from common_utils import (
get_tmp_dir,
int_dtypes,
......@@ -15,7 +16,6 @@ from common_utils import (
cpu_and_gpu,
assert_equal,
)
from PIL import Image
from torchvision import transforms as T
from torchvision.transforms import InterpolationMode
from torchvision.transforms import functional as F
......@@ -771,13 +771,13 @@ def test_autoaugment__op_apply_shear(interpolation, mode):
matrix = (1, level, 0, 0, 1, 0)
elif mode == "Y":
matrix = (1, 0, 0, level, 1, 0)
return pil_img.transform((image_size, image_size), Image.AFFINE, matrix, resample=resample)
return pil_img.transform((image_size, image_size), _pil_constants.AFFINE, matrix, resample=resample)
t_img, pil_img = _create_data(image_size, image_size)
resample_pil = {
F.InterpolationMode.NEAREST: Image.NEAREST,
F.InterpolationMode.BILINEAR: Image.BILINEAR,
F.InterpolationMode.NEAREST: _pil_constants.NEAREST,
F.InterpolationMode.BILINEAR: _pil_constants.BILINEAR,
}[interpolation]
level = 0.3
......
import PIL
from PIL import Image
# See https://pillow.readthedocs.io/en/stable/releasenotes/9.1.0.html#deprecations
# TODO: Remove this file once PIL minimal version is >= 9.1
if tuple(int(part) for part in PIL.__version__.split(".")) >= (9, 1):
BICUBIC = Image.Resampling.BICUBIC
BILINEAR = Image.Resampling.BILINEAR
LINEAR = Image.Resampling.BILINEAR
NEAREST = Image.Resampling.NEAREST
AFFINE = Image.Transform.AFFINE
FLIP_LEFT_RIGHT = Image.Transpose.FLIP_LEFT_RIGHT
FLIP_TOP_BOTTOM = Image.Transpose.FLIP_TOP_BOTTOM
PERSPECTIVE = Image.Transform.PERSPECTIVE
else:
BICUBIC = Image.BICUBIC
BILINEAR = Image.BILINEAR
NEAREST = Image.NEAREST
LINEAR = Image.LINEAR
AFFINE = Image.AFFINE
FLIP_LEFT_RIGHT = Image.FLIP_LEFT_RIGHT
FLIP_TOP_BOTTOM = Image.FLIP_TOP_BOTTOM
PERSPECTIVE = Image.PERSPECTIVE
......@@ -392,7 +392,7 @@ def resize(
:class:`torchvision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
max_size (int, optional): The maximum allowed for the longer edge of
the resized image: if the longer edge of the image is greater
than ``max_size`` after being resized according to ``size``, then
......@@ -572,7 +572,7 @@ def resized_crop(
:class:`torchvision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
Returns:
PIL Image or Tensor: Cropped image.
......@@ -652,7 +652,7 @@ def perspective(
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
......@@ -1012,7 +1012,7 @@ def rotate(
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
......@@ -1105,7 +1105,7 @@ def affine(
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
......
......@@ -10,6 +10,7 @@ try:
import accimage
except ImportError:
accimage = None
from . import _pil_constants
@torch.jit.unused
......@@ -54,7 +55,7 @@ def hflip(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img.transpose(_pil_constants.FLIP_LEFT_RIGHT)
@torch.jit.unused
......@@ -62,7 +63,7 @@ def vflip(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return img.transpose(Image.FLIP_TOP_BOTTOM)
return img.transpose(_pil_constants.FLIP_TOP_BOTTOM)
@torch.jit.unused
......@@ -240,7 +241,7 @@ def crop(
def resize(
img: Image.Image,
size: Union[Sequence[int], int],
interpolation: int = Image.BILINEAR,
interpolation: int = _pil_constants.BILINEAR,
max_size: Optional[int] = None,
) -> Image.Image:
......@@ -314,7 +315,7 @@ def _parse_fill(
def affine(
img: Image.Image,
matrix: List[float],
interpolation: int = Image.NEAREST,
interpolation: int = _pil_constants.NEAREST,
fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
) -> Image.Image:
......@@ -323,14 +324,14 @@ def affine(
output_size = img.size
opts = _parse_fill(fill, img)
return img.transform(output_size, Image.AFFINE, matrix, interpolation, **opts)
return img.transform(output_size, _pil_constants.AFFINE, matrix, interpolation, **opts)
@torch.jit.unused
def rotate(
img: Image.Image,
angle: float,
interpolation: int = Image.NEAREST,
interpolation: int = _pil_constants.NEAREST,
expand: bool = False,
center: Optional[Tuple[int, int]] = None,
fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
......@@ -347,7 +348,7 @@ def rotate(
def perspective(
img: Image.Image,
perspective_coeffs: float,
interpolation: int = Image.BICUBIC,
interpolation: int = _pil_constants.BICUBIC,
fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
) -> Image.Image:
......@@ -356,7 +357,7 @@ def perspective(
opts = _parse_fill(fill, img)
return img.transform(img.size, Image.PERSPECTIVE, perspective_coeffs, interpolation, **opts)
return img.transform(img.size, _pil_constants.PERSPECTIVE, perspective_coeffs, interpolation, **opts)
@torch.jit.unused
......
......@@ -17,7 +17,6 @@ from ..utils import _log_api_usage_once
from . import functional as F
from .functional import InterpolationMode, _interpolation_modes_from_int
__all__ = [
"Compose",
"ToTensor",
......@@ -298,7 +297,7 @@ class Resize(torch.nn.Module):
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
max_size (int, optional): The maximum allowed for the longer edge of
the resized image: if the longer edge of the image is greater
than ``max_size`` after being resized according to ``size``, then
......@@ -755,7 +754,7 @@ class RandomPerspective(torch.nn.Module):
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
fill (sequence or number): Pixel fill value for the area outside the transformed
image. Default is ``0``. If given a number, the value is used for all bands respectively.
"""
......@@ -869,7 +868,7 @@ class RandomResizedCrop(torch.nn.Module):
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
"""
......@@ -1268,7 +1267,7 @@ class RandomRotation(torch.nn.Module):
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
......@@ -1389,7 +1388,7 @@ class RandomAffine(torch.nn.Module):
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still acceptable.
fill (sequence or number): Pixel fill value for the area outside the transformed
image. Default is ``0``. If given a number, the value is used for all bands respectively.
fillcolor (sequence or number, optional):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment