Unverified Commit a89b1957 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Add missing `handle_legacy_interface()` calls (#6565)

* Add `handle_legacy_interface()` to all new models.

* Fix imports

* Addressing review comments.

* Fix linter

* Addressing further comments.
parent cac4e228
......@@ -335,3 +335,17 @@ class TestHandleLegacyInterface:
with pytest.raises(ValueError, match="weights"):
builder(pretrained=True, flag=False)
@pytest.mark.parametrize(
"model_fn",
[fn for fn in TM.list_model_fns(models) if fn.__name__ not in {"vit_h_14", "regnet_y_128gf"}]
+ TM.list_model_fns(models.detection)
+ TM.list_model_fns(models.quantization)
+ TM.list_model_fns(models.segmentation)
+ TM.list_model_fns(models.video)
+ TM.list_model_fns(models.optical_flow),
)
@run_if_test_with_extended
def test_pretrained_deprecation(self, model_fn):
with pytest.warns(UserWarning, match="deprecated"):
model_fn(pretrained=True)
......@@ -571,6 +571,10 @@ def fasterrcnn_resnet50_fpn(
@register_model()
@handle_legacy_interface(
weights=("pretrained", FasterRCNN_ResNet50_FPN_V2_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def fasterrcnn_resnet50_fpn_v2(
*,
weights: Optional[FasterRCNN_ResNet50_FPN_V2_Weights] = None,
......
......@@ -505,6 +505,10 @@ def maskrcnn_resnet50_fpn(
@register_model()
@handle_legacy_interface(
weights=("pretrained", MaskRCNN_ResNet50_FPN_V2_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def maskrcnn_resnet50_fpn_v2(
*,
weights: Optional[MaskRCNN_ResNet50_FPN_V2_Weights] = None,
......
......@@ -819,6 +819,10 @@ def retinanet_resnet50_fpn(
@register_model()
@handle_legacy_interface(
weights=("pretrained", RetinaNet_ResNet50_FPN_V2_Weights.COCO_V1),
weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
)
def retinanet_resnet50_fpn_v2(
*,
weights: Optional[RetinaNet_ResNet50_FPN_V2_Weights] = None,
......
......@@ -421,6 +421,14 @@ def resnext101_32x8d(
@register_model(name="quantized_resnext101_64x4d")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNeXt101_64X4D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
)
)
def resnext101_64x4d(
*,
weights: Optional[Union[ResNeXt101_64X4D_QuantizedWeights, ResNeXt101_64X4D_Weights]] = None,
......
......@@ -312,6 +312,14 @@ def shufflenet_v2_x1_0(
@register_model(name="quantized_shufflenet_v2_x1_5")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ShuffleNet_V2_X1_5_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ShuffleNet_V2_X1_5_Weights.IMAGENET1K_V1,
)
)
def shufflenet_v2_x1_5(
*,
weights: Optional[Union[ShuffleNet_V2_X1_5_QuantizedWeights, ShuffleNet_V2_X1_5_Weights]] = None,
......@@ -358,6 +366,14 @@ def shufflenet_v2_x1_5(
@register_model(name="quantized_shufflenet_v2_x2_0")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ShuffleNet_V2_X2_0_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ShuffleNet_V2_X2_0_Weights.IMAGENET1K_V1,
)
)
def shufflenet_v2_x2_0(
*,
weights: Optional[Union[ShuffleNet_V2_X2_0_QuantizedWeights, ShuffleNet_V2_X2_0_Weights]] = None,
......
......@@ -854,6 +854,7 @@ def resnext101_32x8d(
@register_model()
@handle_legacy_interface(weights=("pretrained", ResNeXt101_64X4D_Weights.IMAGENET1K_V1))
def resnext101_64x4d(
*, weights: Optional[ResNeXt101_64X4D_Weights] = None, progress: bool = True, **kwargs: Any
) -> ResNet:
......
......@@ -12,7 +12,7 @@ from ..transforms._presets import ImageClassification, InterpolationMode
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
......@@ -782,6 +782,7 @@ class Swin_V2_B_Weights(WeightsEnum):
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_T_Weights.IMAGENET1K_V1))
def swin_t(*, weights: Optional[Swin_T_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_tiny architecture from
......@@ -819,6 +820,7 @@ def swin_t(*, weights: Optional[Swin_T_Weights] = None, progress: bool = True, *
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_S_Weights.IMAGENET1K_V1))
def swin_s(*, weights: Optional[Swin_S_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_small architecture from
......@@ -856,6 +858,7 @@ def swin_s(*, weights: Optional[Swin_S_Weights] = None, progress: bool = True, *
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_B_Weights.IMAGENET1K_V1))
def swin_b(*, weights: Optional[Swin_B_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_base architecture from
......@@ -893,6 +896,7 @@ def swin_b(*, weights: Optional[Swin_B_Weights] = None, progress: bool = True, *
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_V2_T_Weights.IMAGENET1K_V1))
def swin_v2_t(*, weights: Optional[Swin_V2_T_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_v2_tiny architecture from
......@@ -932,6 +936,7 @@ def swin_v2_t(*, weights: Optional[Swin_V2_T_Weights] = None, progress: bool = T
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_V2_S_Weights.IMAGENET1K_V1))
def swin_v2_s(*, weights: Optional[Swin_V2_S_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_v2_small architecture from
......@@ -971,6 +976,7 @@ def swin_v2_s(*, weights: Optional[Swin_V2_S_Weights] = None, progress: bool = T
@register_model()
@handle_legacy_interface(weights=("pretrained", Swin_V2_B_Weights.IMAGENET1K_V1))
def swin_v2_b(*, weights: Optional[Swin_V2_B_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer:
"""
Constructs a swin_v2_base architecture from
......
......@@ -12,7 +12,7 @@ from ...transforms._presets import VideoClassification
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _KINETICS400_CATEGORIES
from .._utils import _ovewrite_named_param
from .._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
......@@ -661,6 +661,7 @@ class MViT_V2_S_Weights(WeightsEnum):
@register_model()
@handle_legacy_interface(weights=("pretrained", MViT_V1_B_Weights.KINETICS400_V1))
def mvit_v1_b(*, weights: Optional[MViT_V1_B_Weights] = None, progress: bool = True, **kwargs: Any) -> MViT:
"""
Constructs a base MViTV1 architecture from
......@@ -756,6 +757,7 @@ def mvit_v1_b(*, weights: Optional[MViT_V1_B_Weights] = None, progress: bool = T
@register_model()
@handle_legacy_interface(weights=("pretrained", MViT_V2_S_Weights.KINETICS400_V1))
def mvit_v2_s(*, weights: Optional[MViT_V2_S_Weights] = None, progress: bool = True, **kwargs: Any) -> MViT:
"""
Constructs a small MViTV2 architecture from
......
......@@ -9,7 +9,7 @@ from ...transforms._presets import VideoClassification
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _KINETICS400_CATEGORIES
from .._utils import _ovewrite_named_param
from .._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
......@@ -181,6 +181,7 @@ class S3D_Weights(WeightsEnum):
@register_model()
@handle_legacy_interface(weights=("pretrained", S3D_Weights.KINETICS400_V1))
def s3d(*, weights: Optional[S3D_Weights] = None, progress: bool = True, **kwargs: Any) -> S3D:
"""Construct Separable 3D CNN model.
......
......@@ -733,6 +733,7 @@ def vit_l_32(*, weights: Optional[ViT_L_32_Weights] = None, progress: bool = Tru
@register_model()
@handle_legacy_interface(weights=("pretrained", None))
def vit_h_14(*, weights: Optional[ViT_H_14_Weights] = None, progress: bool = True, **kwargs: Any) -> VisionTransformer:
"""
Constructs a vit_h_14 architecture from
......
......@@ -6,6 +6,7 @@ import torch.nn.functional as F
import torchvision.models.optical_flow.raft as raft
from torch import Tensor
from torchvision.models._api import register_model, WeightsEnum
from torchvision.models._utils import handle_legacy_interface
from torchvision.models.optical_flow._utils import grid_sample, make_coords_grid, upsample_flow
from torchvision.models.optical_flow.raft import FlowHead, MotionEncoder, ResidualBlock
from torchvision.ops import Conv2dNormActivation
......@@ -618,6 +619,7 @@ class Raft_Stereo_Base_Weights(WeightsEnum):
@register_model()
@handle_legacy_interface(weights=("pretrained", None))
def raft_stereo_realtime(
*, weights: Optional[Raft_Stereo_Realtime_Weights] = None, progress=True, **kwargs
) -> RaftStereo:
......@@ -678,6 +680,7 @@ def raft_stereo_realtime(
@register_model()
@handle_legacy_interface(weights=("pretrained", None))
def raft_stereo_base(*, weights: Optional[Raft_Stereo_Base_Weights] = None, progress=True, **kwargs) -> RaftStereo:
"""RAFT-Stereo model from
`RAFT-Stereo: Multilevel Recurrent Field Transforms for Stereo Matching <https://arxiv.org/abs/2109.07547>`_.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment