Unverified Commit e722e9c7 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Reinstate and deprecate `model_urls` and `quant_model_urls` (#5992)



* Reinstate and deprecate `model_urls` and `quant_model_urls`

* Apply suggestions from code review
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>

* Move todo location

* Add alexnet

* Add densenet

* Add efficientnet

* Add googlenet.

* Add inception.

* Add mobilenetv3

* Add regnet

* Add resnet

* Add shufflenetv2

* Fix linter

* Add squeezenet

* Add vgg

* Add vit

* Add quantized googlenet

* Add quantized inceptionv3

* Add quantized mobilenet_v3

* Add quantized resnet

* Add quantized shufflenetv2

* Fix incorrect imports

* Add faster_rcnn

* Add fcos

* Add keypoint rcnn

* Add mask rcnn

* Add retinanet

* Add ssd

* Add ssdlite

* Add deeplabv3

* Add fcn

* Add lraspp.

* Add video resnet

* Removing weights for shufflenetv2_x1.5 and shufflenetv2_x2.0

* Update the comments
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
parent 9166b671
...@@ -134,7 +134,8 @@ def kwonly_to_pos_or_kw(fn: Callable[..., D]) -> Callable[..., D]: ...@@ -134,7 +134,8 @@ def kwonly_to_pos_or_kw(fn: Callable[..., D]) -> Callable[..., D]:
keyword_only_kwargs = dict(zip(keyword_only_params, keyword_only_args)) keyword_only_kwargs = dict(zip(keyword_only_params, keyword_only_args))
warnings.warn( warnings.warn(
f"Using {sequence_to_str(tuple(keyword_only_kwargs.keys()), separate_last='and ')} as positional " f"Using {sequence_to_str(tuple(keyword_only_kwargs.keys()), separate_last='and ')} as positional "
f"parameter(s) is deprecated. Please use keyword parameter(s) instead." f"parameter(s) is deprecated since 0.13 and will be removed in 0.15. Please use keyword parameter(s) "
f"instead."
) )
kwargs.update(keyword_only_kwargs) kwargs.update(keyword_only_kwargs)
...@@ -205,11 +206,13 @@ def handle_legacy_interface(**weights: Tuple[str, Union[Optional[W], Callable[[D ...@@ -205,11 +206,13 @@ def handle_legacy_interface(**weights: Tuple[str, Union[Optional[W], Callable[[D
if not pretrained_positional: if not pretrained_positional:
warnings.warn( warnings.warn(
f"The parameter '{pretrained_param}' is deprecated, please use '{weights_param}' instead." f"The parameter '{pretrained_param}' is deprecated since 0.13 and will be removed in 0.15, "
f"please use '{weights_param}' instead."
) )
msg = ( msg = (
f"Arguments other than a weight enum or `None` for '{weights_param}' are deprecated. " f"Arguments other than a weight enum or `None` for '{weights_param}' are deprecated since 0.13 and "
f"will be removed in 0.15. "
f"The current behavior is equivalent to passing `{weights_param}={default_weights_arg}`." f"The current behavior is equivalent to passing `{weights_param}={default_weights_arg}`."
) )
if pretrained_arg: if pretrained_arg:
...@@ -242,3 +245,12 @@ def _ovewrite_value_param(param: Optional[V], new_value: V) -> V: ...@@ -242,3 +245,12 @@ def _ovewrite_value_param(param: Optional[V], new_value: V) -> V:
if param != new_value: if param != new_value:
raise ValueError(f"The parameter '{param}' expected value {new_value} but got {param} instead.") raise ValueError(f"The parameter '{param}' expected value {new_value} but got {param} instead.")
return new_value return new_value
class _ModelURLs(dict):
def __getitem__(self, item):
warnings.warn(
"Accessing the model URLs via the internal dictionary of the module is deprecated since 0.13 and will "
"be removed in 0.15. Please access them via the appropriate Weights Enum instead."
)
return super().__getitem__(item)
...@@ -111,3 +111,14 @@ def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, ...@@ -111,3 +111,14 @@ def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True,
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"alexnet": AlexNet_Weights.IMAGENET1K_V1.url,
}
)
...@@ -430,3 +430,17 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool ...@@ -430,3 +430,17 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool
weights = DenseNet201_Weights.verify(weights) weights = DenseNet201_Weights.verify(weights)
return _densenet(32, (6, 12, 48, 32), 64, weights, progress, **kwargs) return _densenet(32, (6, 12, 48, 32), 64, weights, progress, **kwargs)
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"densenet121": DenseNet121_Weights.IMAGENET1K_V1.url,
"densenet169": DenseNet169_Weights.IMAGENET1K_V1.url,
"densenet201": DenseNet201_Weights.IMAGENET1K_V1.url,
"densenet161": DenseNet161_Weights.IMAGENET1K_V1.url,
}
)
...@@ -804,3 +804,16 @@ def fasterrcnn_mobilenet_v3_large_fpn( ...@@ -804,3 +804,16 @@ def fasterrcnn_mobilenet_v3_large_fpn(
trainable_backbone_layers=trainable_backbone_layers, trainable_backbone_layers=trainable_backbone_layers,
**kwargs, **kwargs,
) )
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"fasterrcnn_resnet50_fpn_coco": FasterRCNN_ResNet50_FPN_Weights.COCO_V1.url,
"fasterrcnn_mobilenet_v3_large_320_fpn_coco": FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.COCO_V1.url,
"fasterrcnn_mobilenet_v3_large_fpn_coco": FasterRCNN_MobileNet_V3_Large_FPN_Weights.COCO_V1.url,
}
)
...@@ -758,3 +758,14 @@ def fcos_resnet50_fpn( ...@@ -758,3 +758,14 @@ def fcos_resnet50_fpn(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"fcos_resnet50_fpn_coco": FCOS_ResNet50_FPN_Weights.COCO_V1.url,
}
)
...@@ -454,3 +454,16 @@ def keypointrcnn_resnet50_fpn( ...@@ -454,3 +454,16 @@ def keypointrcnn_resnet50_fpn(
overwrite_eps(model, 0.0) overwrite_eps(model, 0.0)
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
# legacy model for BC reasons, see https://github.com/pytorch/vision/issues/1606
"keypointrcnn_resnet50_fpn_coco_legacy": KeypointRCNN_ResNet50_FPN_Weights.COCO_LEGACY.url,
"keypointrcnn_resnet50_fpn_coco": KeypointRCNN_ResNet50_FPN_Weights.COCO_V1.url,
}
)
...@@ -565,3 +565,14 @@ def maskrcnn_resnet50_fpn_v2( ...@@ -565,3 +565,14 @@ def maskrcnn_resnet50_fpn_v2(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"maskrcnn_resnet50_fpn_coco": MaskRCNN_ResNet50_FPN_Weights.COCO_V1.url,
}
)
...@@ -879,3 +879,14 @@ def retinanet_resnet50_fpn_v2( ...@@ -879,3 +879,14 @@ def retinanet_resnet50_fpn_v2(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"retinanet_resnet50_fpn_coco": RetinaNet_ResNet50_FPN_Weights.COCO_V1.url,
}
)
...@@ -672,3 +672,25 @@ def ssd300_vgg16( ...@@ -672,3 +672,25 @@ def ssd300_vgg16(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"ssd300_vgg16_coco": SSD300_VGG16_Weights.COCO_V1.url,
}
)
backbone_urls = _ModelURLs(
{
# We port the features of a VGG16 backbone trained by amdegroot because unlike the one on TorchVision, it uses
# the same input standardization method as the paper.
# Ref: https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth
# Only the `features` weights have proper values, those on the `classifier` module are filled with nans.
"vgg16_features": VGG16_Weights.IMAGENET1K_FEATURES.url,
}
)
...@@ -321,3 +321,14 @@ def ssdlite320_mobilenet_v3_large( ...@@ -321,3 +321,14 @@ def ssdlite320_mobilenet_v3_large(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"ssdlite320_mobilenet_v3_large_coco": SSDLite320_MobileNet_V3_Large_Weights.COCO_V1.url,
}
)
...@@ -1028,3 +1028,23 @@ def efficientnet_v2_l( ...@@ -1028,3 +1028,23 @@ def efficientnet_v2_l(
norm_layer=partial(nn.BatchNorm2d, eps=1e-03), norm_layer=partial(nn.BatchNorm2d, eps=1e-03),
**kwargs, **kwargs,
) )
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
# Weights ported from https://github.com/rwightman/pytorch-image-models/
"efficientnet_b0": EfficientNet_B0_Weights.IMAGENET1K_V1.url,
"efficientnet_b1": EfficientNet_B1_Weights.IMAGENET1K_V1.url,
"efficientnet_b2": EfficientNet_B2_Weights.IMAGENET1K_V1.url,
"efficientnet_b3": EfficientNet_B3_Weights.IMAGENET1K_V1.url,
"efficientnet_b4": EfficientNet_B4_Weights.IMAGENET1K_V1.url,
# Weights ported from https://github.com/lukemelas/EfficientNet-PyTorch/
"efficientnet_b5": EfficientNet_B5_Weights.IMAGENET1K_V1.url,
"efficientnet_b6": EfficientNet_B6_Weights.IMAGENET1K_V1.url,
"efficientnet_b7": EfficientNet_B7_Weights.IMAGENET1K_V1.url,
}
)
...@@ -339,3 +339,15 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T ...@@ -339,3 +339,15 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T
) )
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
# GoogLeNet ported from TensorFlow
"googlenet": GoogLeNet_Weights.IMAGENET1K_V1.url,
}
)
...@@ -471,3 +471,15 @@ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bo ...@@ -471,3 +471,15 @@ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bo
model.AuxLogits = None model.AuxLogits = None
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
# Inception v3 ported from TensorFlow
"inception_v3_google": Inception_V3_Weights.IMAGENET1K_V1.url,
}
)
...@@ -263,3 +263,14 @@ def mobilenet_v2( ...@@ -263,3 +263,14 @@ def mobilenet_v2(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"mobilenet_v2": MobileNet_V2_Weights.IMAGENET1K_V1.url,
}
)
...@@ -414,3 +414,15 @@ def mobilenet_v3_small( ...@@ -414,3 +414,15 @@ def mobilenet_v3_small(
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs) inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs)
return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"mobilenet_v3_large": MobileNet_V3_Large_Weights.IMAGENET1K_V1.url,
"mobilenet_v3_small": MobileNet_V3_Small_Weights.IMAGENET1K_V1.url,
}
)
...@@ -200,3 +200,16 @@ def googlenet( ...@@ -200,3 +200,16 @@ def googlenet(
) )
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
from ..googlenet import model_urls # noqa: F401
quant_model_urls = _ModelURLs(
{
# fp32 GoogLeNet ported from TensorFlow, with weights quantized in PyTorch
"googlenet_fbgemm": GoogLeNet_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
}
)
...@@ -251,3 +251,16 @@ def inception_v3( ...@@ -251,3 +251,16 @@ def inception_v3(
model.AuxLogits = None model.AuxLogits = None
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
from ..inception import model_urls # noqa: F401
quant_model_urls = _ModelURLs(
{
# fp32 weights ported from TensorFlow, quantized in PyTorch
"inception_v3_google_fbgemm": Inception_V3_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
}
)
...@@ -131,3 +131,15 @@ def mobilenet_v2( ...@@ -131,3 +131,15 @@ def mobilenet_v2(
model.load_state_dict(weights.get_state_dict(progress=progress)) model.load_state_dict(weights.get_state_dict(progress=progress))
return model return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
from ..mobilenetv2 import model_urls # noqa: F401
quant_model_urls = _ModelURLs(
{
"mobilenet_v2_qnnpack": MobileNet_V2_QuantizedWeights.IMAGENET1K_QNNPACK_V1.url,
}
)
...@@ -211,3 +211,15 @@ def mobilenet_v3_large( ...@@ -211,3 +211,15 @@ def mobilenet_v3_large(
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs) inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
return _mobilenet_v3_model(inverted_residual_setting, last_channel, weights, progress, quantize, **kwargs) return _mobilenet_v3_model(inverted_residual_setting, last_channel, weights, progress, quantize, **kwargs)
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
from ..mobilenetv3 import model_urls # noqa: F401
quant_model_urls = _ModelURLs(
{
"mobilenet_v3_large_qnnpack": MobileNet_V3_Large_QuantizedWeights.IMAGENET1K_QNNPACK_V1.url,
}
)
...@@ -362,3 +362,17 @@ def resnext101_64x4d( ...@@ -362,3 +362,17 @@ def resnext101_64x4d(
_ovewrite_named_param(kwargs, "groups", 64) _ovewrite_named_param(kwargs, "groups", 64)
_ovewrite_named_param(kwargs, "width_per_group", 4) _ovewrite_named_param(kwargs, "width_per_group", 4)
return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs) return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
from ..resnet import model_urls # noqa: F401
quant_model_urls = _ModelURLs(
{
"resnet18_fbgemm": ResNet18_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
"resnet50_fbgemm": ResNet50_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
"resnext101_32x8d_fbgemm": ResNeXt101_32X8D_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
}
)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment