Unverified Commit bac678c8 authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

remove functionality scheduled for 0.15 after deprecation (#7176)

parent a05d8179
...@@ -2,7 +2,6 @@ import colorsys ...@@ -2,7 +2,6 @@ import colorsys
import itertools import itertools
import math import math
import os import os
import re
from functools import partial from functools import partial
from typing import Sequence from typing import Sequence
...@@ -144,20 +143,6 @@ class TestRotate: ...@@ -144,20 +143,6 @@ class TestRotate:
center = (20, 22) center = (20, 22)
_test_fn_on_batch(batch_tensors, F.rotate, angle=32, interpolation=NEAREST, expand=True, center=center) _test_fn_on_batch(batch_tensors, F.rotate, angle=32, interpolation=NEAREST, expand=True, center=center)
def test_rotate_interpolation_type(self):
tensor, _ = _create_data(26, 26)
# assert changed type warning
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1 = F.rotate(tensor, 45, interpolation=2)
res2 = F.rotate(tensor, 45, interpolation=BILINEAR)
assert_equal(res1, res2)
class TestAffine: class TestAffine:
...@@ -364,22 +349,6 @@ class TestAffine: ...@@ -364,22 +349,6 @@ class TestAffine:
_test_fn_on_batch(batch_tensors, F.affine, angle=-43, translate=[-3, 4], scale=1.2, shear=[4.0, 5.0]) _test_fn_on_batch(batch_tensors, F.affine, angle=-43, translate=[-3, 4], scale=1.2, shear=[4.0, 5.0])
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_warnings(self, device):
tensor, pil_img = _create_data(26, 26, device=device)
# assert changed type warning
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=2)
res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR)
assert_equal(res1, res2)
def _get_data_dims_and_points_for_perspective(): def _get_data_dims_and_points_for_perspective():
# Ideally we would parametrize independently over data dims and points, but # Ideally we would parametrize independently over data dims and points, but
...@@ -478,23 +447,6 @@ def test_perspective_batch(device, dims_and_points, dt): ...@@ -478,23 +447,6 @@ def test_perspective_batch(device, dims_and_points, dt):
) )
def test_perspective_interpolation_warning():
# assert changed type warning
spoints = [[0, 0], [33, 0], [33, 25], [0, 25]]
epoints = [[3, 2], [32, 3], [30, 24], [2, 25]]
tensor = torch.randint(0, 256, (3, 26, 26))
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1 = F.perspective(tensor, startpoints=spoints, endpoints=epoints, interpolation=2)
res2 = F.perspective(tensor, startpoints=spoints, endpoints=epoints, interpolation=BILINEAR)
assert_equal(res1, res2)
@pytest.mark.parametrize("device", cpu_and_gpu()) @pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16]) @pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize( @pytest.mark.parametrize(
...@@ -568,19 +520,6 @@ def test_resize_asserts(device): ...@@ -568,19 +520,6 @@ def test_resize_asserts(device):
tensor, pil_img = _create_data(26, 36, device=device) tensor, pil_img = _create_data(26, 36, device=device)
# assert changed type warning
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1 = F.resize(tensor, size=32, interpolation=2)
res2 = F.resize(tensor, size=32, interpolation=BILINEAR)
assert_equal(res1, res2)
for img in (tensor, pil_img): for img in (tensor, pil_img):
exp_msg = "max_size should only be passed if size specifies the length of the smaller edge" exp_msg = "max_size should only be passed if size specifies the length of the smaller edge"
with pytest.raises(ValueError, match=exp_msg): with pytest.raises(ValueError, match=exp_msg):
......
...@@ -87,12 +87,6 @@ CONSISTENCY_CONFIGS = [ ...@@ -87,12 +87,6 @@ CONSISTENCY_CONFIGS = [
ArgsKwargs((32, 29)), ArgsKwargs((32, 29)),
ArgsKwargs((31, 28), interpolation=prototype_transforms.InterpolationMode.NEAREST), ArgsKwargs((31, 28), interpolation=prototype_transforms.InterpolationMode.NEAREST),
ArgsKwargs((33, 26), interpolation=prototype_transforms.InterpolationMode.BICUBIC), ArgsKwargs((33, 26), interpolation=prototype_transforms.InterpolationMode.BICUBIC),
# FIXME: these are currently failing, since the new transform only supports the enum. The int input is
# already deprecated and scheduled to be removed in 0.15. Should we support ints on the prototype
# transform? I guess it depends if we roll out before 0.15 or not.
# ArgsKwargs((30, 27), interpolation=0),
# ArgsKwargs((35, 29), interpolation=2),
# ArgsKwargs((34, 25), interpolation=3),
NotScriptableArgsKwargs(31, max_size=32), NotScriptableArgsKwargs(31, max_size=32),
ArgsKwargs([31], max_size=32), ArgsKwargs([31], max_size=32),
NotScriptableArgsKwargs(30, max_size=100), NotScriptableArgsKwargs(30, max_size=100),
......
...@@ -1872,17 +1872,6 @@ def test_random_rotation(): ...@@ -1872,17 +1872,6 @@ def test_random_rotation():
# Checking if RandomRotation can be printed as string # Checking if RandomRotation can be printed as string
t.__repr__() t.__repr__()
# assert changed type warning
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
t = transforms.RandomRotation((-10, 10), interpolation=2)
assert t.interpolation == transforms.InterpolationMode.BILINEAR
def test_random_rotation_error(): def test_random_rotation_error():
# assert fill being either a Sequence or a Number # assert fill being either a Sequence or a Number
...@@ -2212,17 +2201,6 @@ def test_random_affine(): ...@@ -2212,17 +2201,6 @@ def test_random_affine():
t = transforms.RandomAffine(10, interpolation=transforms.InterpolationMode.BILINEAR) t = transforms.RandomAffine(10, interpolation=transforms.InterpolationMode.BILINEAR)
assert "bilinear" in t.__repr__() assert "bilinear" in t.__repr__()
# assert changed type warning
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
t = transforms.RandomAffine(10, interpolation=2)
assert t.interpolation == transforms.InterpolationMode.BILINEAR
def test_elastic_transformation(): def test_elastic_transformation():
with pytest.raises(TypeError, match=r"alpha should be float or a sequence of floats"): with pytest.raises(TypeError, match=r"alpha should be float or a sequence of floats"):
......
...@@ -48,19 +48,6 @@ def _urlretrieve(url: str, filename: str, chunk_size: int = 1024 * 32) -> None: ...@@ -48,19 +48,6 @@ def _urlretrieve(url: str, filename: str, chunk_size: int = 1024 * 32) -> None:
_save_response_content(iter(lambda: response.read(chunk_size), b""), filename, length=response.length) _save_response_content(iter(lambda: response.read(chunk_size), b""), filename, length=response.length)
def gen_bar_updater() -> Callable[[int, int, int], None]:
warnings.warn("The function `gen_bar_update` is deprecated since 0.13 and will be removed in 0.15.")
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str: def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str:
# Setting the `usedforsecurity` flag does not change anything about the functionality, but indicates that we are # Setting the `usedforsecurity` flag does not change anything about the functionality, but indicates that we are
# not using the MD5 checksum for cryptography. This enables its usage in restricted environments like FIPS. Without # not using the MD5 checksum for cryptography. This enables its usage in restricted environments like FIPS. Without
......
...@@ -117,14 +117,3 @@ def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, ...@@ -117,14 +117,3 @@ def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True,
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"alexnet": AlexNet_Weights.IMAGENET1K_V1.url,
}
)
...@@ -446,16 +446,3 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool ...@@ -446,16 +446,3 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool
weights = DenseNet201_Weights.verify(weights) weights = DenseNet201_Weights.verify(weights)
return _densenet(32, (6, 12, 48, 32), 64, weights, progress, **kwargs) return _densenet(32, (6, 12, 48, 32), 64, weights, progress, **kwargs)
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"densenet121": DenseNet121_Weights.IMAGENET1K_V1.url,
"densenet169": DenseNet169_Weights.IMAGENET1K_V1.url,
"densenet201": DenseNet201_Weights.IMAGENET1K_V1.url,
"densenet161": DenseNet161_Weights.IMAGENET1K_V1.url,
}
)
...@@ -841,16 +841,3 @@ def fasterrcnn_mobilenet_v3_large_fpn( ...@@ -841,16 +841,3 @@ def fasterrcnn_mobilenet_v3_large_fpn(
trainable_backbone_layers=trainable_backbone_layers, trainable_backbone_layers=trainable_backbone_layers,
**kwargs, **kwargs,
) )
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"fasterrcnn_resnet50_fpn_coco": FasterRCNN_ResNet50_FPN_Weights.COCO_V1.url,
"fasterrcnn_mobilenet_v3_large_320_fpn_coco": FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.COCO_V1.url,
"fasterrcnn_mobilenet_v3_large_fpn_coco": FasterRCNN_MobileNet_V3_Large_FPN_Weights.COCO_V1.url,
}
)
...@@ -769,14 +769,3 @@ def fcos_resnet50_fpn( ...@@ -769,14 +769,3 @@ def fcos_resnet50_fpn(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"fcos_resnet50_fpn_coco": FCOS_ResNet50_FPN_Weights.COCO_V1.url,
}
)
...@@ -470,16 +470,3 @@ def keypointrcnn_resnet50_fpn( ...@@ -470,16 +470,3 @@ def keypointrcnn_resnet50_fpn(
overwrite_eps(model, 0.0) overwrite_eps(model, 0.0)
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
# legacy model for BC reasons, see https://github.com/pytorch/vision/issues/1606
"keypointrcnn_resnet50_fpn_coco_legacy": KeypointRCNN_ResNet50_FPN_Weights.COCO_LEGACY.url,
"keypointrcnn_resnet50_fpn_coco": KeypointRCNN_ResNet50_FPN_Weights.COCO_V1.url,
}
)
...@@ -585,14 +585,3 @@ def maskrcnn_resnet50_fpn_v2( ...@@ -585,14 +585,3 @@ def maskrcnn_resnet50_fpn_v2(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"maskrcnn_resnet50_fpn_coco": MaskRCNN_ResNet50_FPN_Weights.COCO_V1.url,
}
)
...@@ -897,14 +897,3 @@ def retinanet_resnet50_fpn_v2( ...@@ -897,14 +897,3 @@ def retinanet_resnet50_fpn_v2(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"retinanet_resnet50_fpn_coco": RetinaNet_ResNet50_FPN_Weights.COCO_V1.url,
}
)
...@@ -680,25 +680,3 @@ def ssd300_vgg16( ...@@ -680,25 +680,3 @@ def ssd300_vgg16(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"ssd300_vgg16_coco": SSD300_VGG16_Weights.COCO_V1.url,
}
)
backbone_urls = _ModelURLs(
{
# We port the features of a VGG16 backbone trained by amdegroot because unlike the one on TorchVision, it uses
# the same input standardization method as the paper.
# Ref: https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth
# Only the `features` weights have proper values, those on the `classifier` module are filled with nans.
"vgg16_features": VGG16_Weights.IMAGENET1K_FEATURES.url,
}
)
...@@ -329,14 +329,3 @@ def ssdlite320_mobilenet_v3_large( ...@@ -329,14 +329,3 @@ def ssdlite320_mobilenet_v3_large(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"ssdlite320_mobilenet_v3_large_coco": SSDLite320_MobileNet_V3_Large_Weights.COCO_V1.url,
}
)
import copy import copy
import math import math
import warnings
from dataclasses import dataclass from dataclasses import dataclass
from functools import partial from functools import partial
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
...@@ -239,7 +238,6 @@ class EfficientNet(nn.Module): ...@@ -239,7 +238,6 @@ class EfficientNet(nn.Module):
num_classes: int = 1000, num_classes: int = 1000,
norm_layer: Optional[Callable[..., nn.Module]] = None, norm_layer: Optional[Callable[..., nn.Module]] = None,
last_channel: Optional[int] = None, last_channel: Optional[int] = None,
**kwargs: Any,
) -> None: ) -> None:
""" """
EfficientNet V1 and V2 main class EfficientNet V1 and V2 main class
...@@ -263,16 +261,6 @@ class EfficientNet(nn.Module): ...@@ -263,16 +261,6 @@ class EfficientNet(nn.Module):
): ):
raise TypeError("The inverted_residual_setting should be List[MBConvConfig]") raise TypeError("The inverted_residual_setting should be List[MBConvConfig]")
if "block" in kwargs:
warnings.warn(
"The parameter 'block' is deprecated since 0.13 and will be removed 0.15. "
"Please pass this information on 'MBConvConfig.block' instead."
)
if kwargs["block"] is not None:
for s in inverted_residual_setting:
if isinstance(s, MBConvConfig):
s.block = kwargs["block"]
if norm_layer is None: if norm_layer is None:
norm_layer = nn.BatchNorm2d norm_layer = nn.BatchNorm2d
...@@ -1141,21 +1129,3 @@ def efficientnet_v2_l( ...@@ -1141,21 +1129,3 @@ def efficientnet_v2_l(
norm_layer=partial(nn.BatchNorm2d, eps=1e-03), norm_layer=partial(nn.BatchNorm2d, eps=1e-03),
**kwargs, **kwargs,
) )
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"efficientnet_b0": EfficientNet_B0_Weights.IMAGENET1K_V1.url,
"efficientnet_b1": EfficientNet_B1_Weights.IMAGENET1K_V1.url,
"efficientnet_b2": EfficientNet_B2_Weights.IMAGENET1K_V1.url,
"efficientnet_b3": EfficientNet_B3_Weights.IMAGENET1K_V1.url,
"efficientnet_b4": EfficientNet_B4_Weights.IMAGENET1K_V1.url,
"efficientnet_b5": EfficientNet_B5_Weights.IMAGENET1K_V1.url,
"efficientnet_b6": EfficientNet_B6_Weights.IMAGENET1K_V1.url,
"efficientnet_b7": EfficientNet_B7_Weights.IMAGENET1K_V1.url,
}
)
...@@ -343,15 +343,3 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T ...@@ -343,15 +343,3 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T
) )
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
# GoogLeNet ported from TensorFlow
"googlenet": GoogLeNet_Weights.IMAGENET1K_V1.url,
}
)
...@@ -476,15 +476,3 @@ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bo ...@@ -476,15 +476,3 @@ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bo
model.AuxLogits = None model.AuxLogits = None
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
# Inception v3 ported from TensorFlow
"inception_v3_google": Inception_V3_Weights.IMAGENET1K_V1.url,
}
)
...@@ -258,14 +258,3 @@ def mobilenet_v2( ...@@ -258,14 +258,3 @@ def mobilenet_v2(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"mobilenet_v2": MobileNet_V2_Weights.IMAGENET1K_V1.url,
}
)
...@@ -421,15 +421,3 @@ def mobilenet_v3_small( ...@@ -421,15 +421,3 @@ def mobilenet_v3_small(
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs) inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs)
return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"mobilenet_v3_large": MobileNet_V3_Large_Weights.IMAGENET1K_V1.url,
"mobilenet_v3_small": MobileNet_V3_Small_Weights.IMAGENET1K_V1.url,
}
)
...@@ -208,16 +208,3 @@ def googlenet( ...@@ -208,16 +208,3 @@ def googlenet(
) )
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
from ..googlenet import model_urls # noqa: F401
quant_model_urls = _ModelURLs(
{
# fp32 GoogLeNet ported from TensorFlow, with weights quantized in PyTorch
"googlenet_fbgemm": GoogLeNet_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
}
)
...@@ -271,16 +271,3 @@ def inception_v3( ...@@ -271,16 +271,3 @@ def inception_v3(
model.AuxLogits = None model.AuxLogits = None
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
from ..inception import model_urls # noqa: F401
quant_model_urls = _ModelURLs(
{
# fp32 weights ported from TensorFlow, quantized in PyTorch
"inception_v3_google_fbgemm": Inception_V3_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
}
)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment