Unverified Commit 385a44f8 authored by YosuaMichael's avatar YosuaMichael Committed by GitHub
Browse files

Remove publication_year and interpolation meta (#5848)



* Remove publication_year and interpolation meta

* Add type to _COMMON_META and _COMMON_SWAG_META to prevent error from mypy check

* Remove test to check interpolation and publication_year meta
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent 7d83be5e
...@@ -81,7 +81,7 @@ def test_naming_conventions(model_fn): ...@@ -81,7 +81,7 @@ def test_naming_conventions(model_fn):
def test_schema_meta_validation(model_fn): def test_schema_meta_validation(model_fn):
classification_fields = ["size", "categories", "acc@1", "acc@5", "min_size"] classification_fields = ["size", "categories", "acc@1", "acc@5", "min_size"]
defaults = { defaults = {
"all": ["task", "architecture", "publication_year", "interpolation", "recipe", "num_params"], "all": ["task", "architecture", "recipe", "num_params"],
"models": classification_fields, "models": classification_fields,
"detection": ["categories", "map"], "detection": ["categories", "map"],
"quantization": classification_fields + ["backend", "quantization", "unquantized"], "quantization": classification_fields + ["backend", "quantization", "unquantized"],
......
...@@ -4,7 +4,7 @@ from typing import Any, Optional ...@@ -4,7 +4,7 @@ from typing import Any, Optional
import torch import torch
import torch.nn as nn import torch.nn as nn
from ..transforms._presets import ImageClassification, InterpolationMode from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once from ..utils import _log_api_usage_once
from ._api import WeightsEnum, Weights from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES from ._meta import _IMAGENET_CATEGORIES
...@@ -59,12 +59,10 @@ class AlexNet_Weights(WeightsEnum): ...@@ -59,12 +59,10 @@ class AlexNet_Weights(WeightsEnum):
meta={ meta={
"task": "image_classification", "task": "image_classification",
"architecture": "AlexNet", "architecture": "AlexNet",
"publication_year": 2012,
"num_params": 61100840, "num_params": 61100840,
"size": (224, 224), "size": (224, 224),
"min_size": (63, 63), "min_size": (63, 63),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
"acc@1": 56.522, "acc@1": 56.522,
"acc@5": 79.066, "acc@5": 79.066,
......
...@@ -7,7 +7,7 @@ from torch.nn import functional as F ...@@ -7,7 +7,7 @@ from torch.nn import functional as F
from ..ops.misc import Conv2dNormActivation from ..ops.misc import Conv2dNormActivation
from ..ops.stochastic_depth import StochasticDepth from ..ops.stochastic_depth import StochasticDepth
from ..transforms._presets import ImageClassification, InterpolationMode from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once from ..utils import _log_api_usage_once
from ._api import WeightsEnum, Weights from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES from ._meta import _IMAGENET_CATEGORIES
...@@ -206,11 +206,9 @@ def _convnext( ...@@ -206,11 +206,9 @@ def _convnext(
_COMMON_META = { _COMMON_META = {
"task": "image_classification", "task": "image_classification",
"architecture": "ConvNeXt", "architecture": "ConvNeXt",
"publication_year": 2022,
"size": (224, 224), "size": (224, 224),
"min_size": (32, 32), "min_size": (32, 32),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#convnext", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#convnext",
} }
......
...@@ -9,7 +9,7 @@ import torch.nn.functional as F ...@@ -9,7 +9,7 @@ import torch.nn.functional as F
import torch.utils.checkpoint as cp import torch.utils.checkpoint as cp
from torch import Tensor from torch import Tensor
from ..transforms._presets import ImageClassification, InterpolationMode from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once from ..utils import _log_api_usage_once
from ._api import WeightsEnum, Weights from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES from ._meta import _IMAGENET_CATEGORIES
...@@ -268,11 +268,9 @@ def _densenet( ...@@ -268,11 +268,9 @@ def _densenet(
_COMMON_META = { _COMMON_META = {
"task": "image_classification", "task": "image_classification",
"architecture": "DenseNet", "architecture": "DenseNet",
"publication_year": 2016,
"size": (224, 224), "size": (224, 224),
"min_size": (29, 29), "min_size": (29, 29),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/pull/116", "recipe": "https://github.com/pytorch/vision/pull/116",
} }
......
...@@ -6,7 +6,7 @@ from torch import nn ...@@ -6,7 +6,7 @@ from torch import nn
from torchvision.ops import MultiScaleRoIAlign from torchvision.ops import MultiScaleRoIAlign
from ...ops import misc as misc_nn_ops from ...ops import misc as misc_nn_ops
from ...transforms._presets import ObjectDetection, InterpolationMode from ...transforms._presets import ObjectDetection
from .._api import WeightsEnum, Weights from .._api import WeightsEnum, Weights
from .._meta import _COCO_CATEGORIES from .._meta import _COCO_CATEGORIES
from .._utils import handle_legacy_interface, _ovewrite_value_param from .._utils import handle_legacy_interface, _ovewrite_value_param
...@@ -372,9 +372,7 @@ class FastRCNNPredictor(nn.Module): ...@@ -372,9 +372,7 @@ class FastRCNNPredictor(nn.Module):
_COMMON_META = { _COMMON_META = {
"task": "image_object_detection", "task": "image_object_detection",
"architecture": "FasterRCNN", "architecture": "FasterRCNN",
"publication_year": 2015,
"categories": _COCO_CATEGORIES, "categories": _COCO_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
} }
...@@ -398,7 +396,6 @@ class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): ...@@ -398,7 +396,6 @@ class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
transforms=ObjectDetection, transforms=ObjectDetection,
meta={ meta={
**_COMMON_META, **_COMMON_META,
"publication_year": 2021,
"num_params": 43712278, "num_params": 43712278,
"recipe": "https://github.com/pytorch/vision/pull/5763", "recipe": "https://github.com/pytorch/vision/pull/5763",
"map": 46.7, "map": 46.7,
......
...@@ -11,7 +11,7 @@ from ...ops import sigmoid_focal_loss, generalized_box_iou_loss ...@@ -11,7 +11,7 @@ from ...ops import sigmoid_focal_loss, generalized_box_iou_loss
from ...ops import boxes as box_ops from ...ops import boxes as box_ops
from ...ops import misc as misc_nn_ops from ...ops import misc as misc_nn_ops
from ...ops.feature_pyramid_network import LastLevelP6P7 from ...ops.feature_pyramid_network import LastLevelP6P7
from ...transforms._presets import ObjectDetection, InterpolationMode from ...transforms._presets import ObjectDetection
from ...utils import _log_api_usage_once from ...utils import _log_api_usage_once
from .._api import WeightsEnum, Weights from .._api import WeightsEnum, Weights
from .._meta import _COCO_CATEGORIES from .._meta import _COCO_CATEGORIES
...@@ -653,10 +653,8 @@ class FCOS_ResNet50_FPN_Weights(WeightsEnum): ...@@ -653,10 +653,8 @@ class FCOS_ResNet50_FPN_Weights(WeightsEnum):
meta={ meta={
"task": "image_object_detection", "task": "image_object_detection",
"architecture": "FCOS", "architecture": "FCOS",
"publication_year": 2019,
"num_params": 32269600, "num_params": 32269600,
"categories": _COCO_CATEGORIES, "categories": _COCO_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#fcos-resnet-50-fpn", "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#fcos-resnet-50-fpn",
"map": 39.2, "map": 39.2,
}, },
......
...@@ -5,7 +5,7 @@ from torch import nn ...@@ -5,7 +5,7 @@ from torch import nn
from torchvision.ops import MultiScaleRoIAlign from torchvision.ops import MultiScaleRoIAlign
from ...ops import misc as misc_nn_ops from ...ops import misc as misc_nn_ops
from ...transforms._presets import ObjectDetection, InterpolationMode from ...transforms._presets import ObjectDetection
from .._api import WeightsEnum, Weights from .._api import WeightsEnum, Weights
from .._meta import _COCO_PERSON_CATEGORIES, _COCO_PERSON_KEYPOINT_NAMES from .._meta import _COCO_PERSON_CATEGORIES, _COCO_PERSON_KEYPOINT_NAMES
from .._utils import handle_legacy_interface, _ovewrite_value_param from .._utils import handle_legacy_interface, _ovewrite_value_param
...@@ -310,10 +310,8 @@ class KeypointRCNNPredictor(nn.Module): ...@@ -310,10 +310,8 @@ class KeypointRCNNPredictor(nn.Module):
_COMMON_META = { _COMMON_META = {
"task": "image_object_detection", "task": "image_object_detection",
"architecture": "KeypointRCNN", "architecture": "KeypointRCNN",
"publication_year": 2017,
"categories": _COCO_PERSON_CATEGORIES, "categories": _COCO_PERSON_CATEGORIES,
"keypoint_names": _COCO_PERSON_KEYPOINT_NAMES, "keypoint_names": _COCO_PERSON_KEYPOINT_NAMES,
"interpolation": InterpolationMode.BILINEAR,
} }
......
...@@ -5,7 +5,7 @@ from torch import nn ...@@ -5,7 +5,7 @@ from torch import nn
from torchvision.ops import MultiScaleRoIAlign from torchvision.ops import MultiScaleRoIAlign
from ...ops import misc as misc_nn_ops from ...ops import misc as misc_nn_ops
from ...transforms._presets import ObjectDetection, InterpolationMode from ...transforms._presets import ObjectDetection
from .._api import WeightsEnum, Weights from .._api import WeightsEnum, Weights
from .._meta import _COCO_CATEGORIES from .._meta import _COCO_CATEGORIES
from .._utils import handle_legacy_interface, _ovewrite_value_param from .._utils import handle_legacy_interface, _ovewrite_value_param
...@@ -354,7 +354,6 @@ _COMMON_META = { ...@@ -354,7 +354,6 @@ _COMMON_META = {
"task": "image_object_detection", "task": "image_object_detection",
"architecture": "MaskRCNN", "architecture": "MaskRCNN",
"categories": _COCO_CATEGORIES, "categories": _COCO_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
} }
...@@ -364,7 +363,6 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum): ...@@ -364,7 +363,6 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum):
transforms=ObjectDetection, transforms=ObjectDetection,
meta={ meta={
**_COMMON_META, **_COMMON_META,
"publication_year": 2017,
"num_params": 44401393, "num_params": 44401393,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#mask-r-cnn", "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#mask-r-cnn",
"map": 37.9, "map": 37.9,
...@@ -380,7 +378,6 @@ class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): ...@@ -380,7 +378,6 @@ class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
transforms=ObjectDetection, transforms=ObjectDetection,
meta={ meta={
**_COMMON_META, **_COMMON_META,
"publication_year": 2021,
"num_params": 46359409, "num_params": 46359409,
"recipe": "https://github.com/pytorch/vision/pull/5773", "recipe": "https://github.com/pytorch/vision/pull/5773",
"map": 47.4, "map": 47.4,
......
...@@ -11,7 +11,7 @@ from ...ops import sigmoid_focal_loss ...@@ -11,7 +11,7 @@ from ...ops import sigmoid_focal_loss
from ...ops import boxes as box_ops from ...ops import boxes as box_ops
from ...ops import misc as misc_nn_ops from ...ops import misc as misc_nn_ops
from ...ops.feature_pyramid_network import LastLevelP6P7 from ...ops.feature_pyramid_network import LastLevelP6P7
from ...transforms._presets import ObjectDetection, InterpolationMode from ...transforms._presets import ObjectDetection
from ...utils import _log_api_usage_once from ...utils import _log_api_usage_once
from .._api import WeightsEnum, Weights from .._api import WeightsEnum, Weights
from .._meta import _COCO_CATEGORIES from .._meta import _COCO_CATEGORIES
...@@ -677,7 +677,6 @@ _COMMON_META = { ...@@ -677,7 +677,6 @@ _COMMON_META = {
"task": "image_object_detection", "task": "image_object_detection",
"architecture": "RetinaNet", "architecture": "RetinaNet",
"categories": _COCO_CATEGORIES, "categories": _COCO_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
} }
...@@ -687,7 +686,6 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum): ...@@ -687,7 +686,6 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum):
transforms=ObjectDetection, transforms=ObjectDetection,
meta={ meta={
**_COMMON_META, **_COMMON_META,
"publication_year": 2017,
"num_params": 34014999, "num_params": 34014999,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#retinanet", "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#retinanet",
"map": 36.4, "map": 36.4,
...@@ -702,7 +700,6 @@ class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum): ...@@ -702,7 +700,6 @@ class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum):
transforms=ObjectDetection, transforms=ObjectDetection,
meta={ meta={
**_COMMON_META, **_COMMON_META,
"publication_year": 2019,
"num_params": 38198935, "num_params": 38198935,
"recipe": "https://github.com/pytorch/vision/pull/5756", "recipe": "https://github.com/pytorch/vision/pull/5756",
"map": 41.5, "map": 41.5,
......
...@@ -7,7 +7,7 @@ import torch.nn.functional as F ...@@ -7,7 +7,7 @@ import torch.nn.functional as F
from torch import nn, Tensor from torch import nn, Tensor
from ...ops import boxes as box_ops from ...ops import boxes as box_ops
from ...transforms._presets import ObjectDetection, InterpolationMode from ...transforms._presets import ObjectDetection
from ...utils import _log_api_usage_once from ...utils import _log_api_usage_once
from .._api import WeightsEnum, Weights from .._api import WeightsEnum, Weights
from .._meta import _COCO_CATEGORIES from .._meta import _COCO_CATEGORIES
...@@ -32,11 +32,9 @@ class SSD300_VGG16_Weights(WeightsEnum): ...@@ -32,11 +32,9 @@ class SSD300_VGG16_Weights(WeightsEnum):
meta={ meta={
"task": "image_object_detection", "task": "image_object_detection",
"architecture": "SSD", "architecture": "SSD",
"publication_year": 2015,
"num_params": 35641826, "num_params": 35641826,
"size": (300, 300), "size": (300, 300),
"categories": _COCO_CATEGORIES, "categories": _COCO_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssd300-vgg16", "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssd300-vgg16",
"map": 25.1, "map": 25.1,
}, },
......
...@@ -7,7 +7,7 @@ import torch ...@@ -7,7 +7,7 @@ import torch
from torch import nn, Tensor from torch import nn, Tensor
from ...ops.misc import Conv2dNormActivation from ...ops.misc import Conv2dNormActivation
from ...transforms._presets import ObjectDetection, InterpolationMode from ...transforms._presets import ObjectDetection
from ...utils import _log_api_usage_once from ...utils import _log_api_usage_once
from .. import mobilenet from .. import mobilenet
from .._api import WeightsEnum, Weights from .._api import WeightsEnum, Weights
...@@ -191,11 +191,9 @@ class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum): ...@@ -191,11 +191,9 @@ class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum):
meta={ meta={
"task": "image_object_detection", "task": "image_object_detection",
"architecture": "SSDLite", "architecture": "SSDLite",
"publication_year": 2018,
"num_params": 3440060, "num_params": 3440060,
"size": (320, 320), "size": (320, 320),
"categories": _COCO_CATEGORIES, "categories": _COCO_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssdlite320-mobilenetv3-large", "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#ssdlite320-mobilenetv3-large",
"map": 21.3, "map": 21.3,
}, },
......
...@@ -439,8 +439,6 @@ _COMMON_META = { ...@@ -439,8 +439,6 @@ _COMMON_META = {
_COMMON_META_V1 = { _COMMON_META_V1 = {
**_COMMON_META, **_COMMON_META,
"architecture": "EfficientNet", "architecture": "EfficientNet",
"publication_year": 2019,
"interpolation": InterpolationMode.BICUBIC,
"min_size": (1, 1), "min_size": (1, 1),
} }
...@@ -448,8 +446,6 @@ _COMMON_META_V1 = { ...@@ -448,8 +446,6 @@ _COMMON_META_V1 = {
_COMMON_META_V2 = { _COMMON_META_V2 = {
**_COMMON_META, **_COMMON_META,
"architecture": "EfficientNetV2", "architecture": "EfficientNetV2",
"publication_year": 2021,
"interpolation": InterpolationMode.BILINEAR,
"min_size": (33, 33), "min_size": (33, 33),
} }
...@@ -494,7 +490,6 @@ class EfficientNet_B1_Weights(WeightsEnum): ...@@ -494,7 +490,6 @@ class EfficientNet_B1_Weights(WeightsEnum):
**_COMMON_META_V1, **_COMMON_META_V1,
"num_params": 7794184, "num_params": 7794184,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-lr-wd-crop-tuning", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-lr-wd-crop-tuning",
"interpolation": InterpolationMode.BILINEAR,
"size": (240, 240), "size": (240, 240),
"acc@1": 79.838, "acc@1": 79.838,
"acc@5": 94.934, "acc@5": 94.934,
......
...@@ -8,7 +8,7 @@ import torch.nn as nn ...@@ -8,7 +8,7 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torch import Tensor from torch import Tensor
from ..transforms._presets import ImageClassification, InterpolationMode from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once from ..utils import _log_api_usage_once
from ._api import WeightsEnum, Weights from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES from ._meta import _IMAGENET_CATEGORIES
...@@ -282,12 +282,10 @@ class GoogLeNet_Weights(WeightsEnum): ...@@ -282,12 +282,10 @@ class GoogLeNet_Weights(WeightsEnum):
meta={ meta={
"task": "image_classification", "task": "image_classification",
"architecture": "GoogLeNet", "architecture": "GoogLeNet",
"publication_year": 2014,
"num_params": 6624904, "num_params": 6624904,
"size": (224, 224), "size": (224, 224),
"min_size": (15, 15), "min_size": (15, 15),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#googlenet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#googlenet",
"acc@1": 69.778, "acc@1": 69.778,
"acc@5": 89.530, "acc@5": 89.530,
......
...@@ -7,7 +7,7 @@ import torch ...@@ -7,7 +7,7 @@ import torch
import torch.nn.functional as F import torch.nn.functional as F
from torch import nn, Tensor from torch import nn, Tensor
from ..transforms._presets import ImageClassification, InterpolationMode from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once from ..utils import _log_api_usage_once
from ._api import WeightsEnum, Weights from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES from ._meta import _IMAGENET_CATEGORIES
...@@ -414,12 +414,10 @@ class Inception_V3_Weights(WeightsEnum): ...@@ -414,12 +414,10 @@ class Inception_V3_Weights(WeightsEnum):
meta={ meta={
"task": "image_classification", "task": "image_classification",
"architecture": "InceptionV3", "architecture": "InceptionV3",
"publication_year": 2015,
"num_params": 27161264, "num_params": 27161264,
"size": (299, 299), "size": (299, 299),
"min_size": (75, 75), "min_size": (75, 75),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#inception-v3", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#inception-v3",
"acc@1": 77.294, "acc@1": 77.294,
"acc@5": 93.450, "acc@5": 93.450,
......
...@@ -6,7 +6,7 @@ import torch ...@@ -6,7 +6,7 @@ import torch
import torch.nn as nn import torch.nn as nn
from torch import Tensor from torch import Tensor
from ..transforms._presets import ImageClassification, InterpolationMode from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once from ..utils import _log_api_usage_once
from ._api import WeightsEnum, Weights from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES from ._meta import _IMAGENET_CATEGORIES
...@@ -214,11 +214,9 @@ class MNASNet(torch.nn.Module): ...@@ -214,11 +214,9 @@ class MNASNet(torch.nn.Module):
_COMMON_META = { _COMMON_META = {
"task": "image_classification", "task": "image_classification",
"architecture": "MNASNet", "architecture": "MNASNet",
"publication_year": 2018,
"size": (224, 224), "size": (224, 224),
"min_size": (1, 1), "min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/1e100/mnasnet_trainer", "recipe": "https://github.com/1e100/mnasnet_trainer",
} }
......
...@@ -7,7 +7,7 @@ from torch import Tensor ...@@ -7,7 +7,7 @@ from torch import Tensor
from torch import nn from torch import nn
from ..ops.misc import Conv2dNormActivation from ..ops.misc import Conv2dNormActivation
from ..transforms._presets import ImageClassification, InterpolationMode from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once from ..utils import _log_api_usage_once
from ._api import WeightsEnum, Weights from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES from ._meta import _IMAGENET_CATEGORIES
...@@ -197,12 +197,10 @@ class MobileNetV2(nn.Module): ...@@ -197,12 +197,10 @@ class MobileNetV2(nn.Module):
_COMMON_META = { _COMMON_META = {
"task": "image_classification", "task": "image_classification",
"architecture": "MobileNetV2", "architecture": "MobileNetV2",
"publication_year": 2018,
"num_params": 3504872, "num_params": 3504872,
"size": (224, 224), "size": (224, 224),
"min_size": (1, 1), "min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
} }
......
...@@ -6,7 +6,7 @@ import torch ...@@ -6,7 +6,7 @@ import torch
from torch import nn, Tensor from torch import nn, Tensor
from ..ops.misc import Conv2dNormActivation, SqueezeExcitation as SElayer from ..ops.misc import Conv2dNormActivation, SqueezeExcitation as SElayer
from ..transforms._presets import ImageClassification, InterpolationMode from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once from ..utils import _log_api_usage_once
from ._api import WeightsEnum, Weights from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES from ._meta import _IMAGENET_CATEGORIES
...@@ -306,11 +306,9 @@ def _mobilenet_v3( ...@@ -306,11 +306,9 @@ def _mobilenet_v3(
_COMMON_META = { _COMMON_META = {
"task": "image_classification", "task": "image_classification",
"architecture": "MobileNetV3", "architecture": "MobileNetV3",
"publication_year": 2019,
"size": (224, 224), "size": (224, 224),
"min_size": (1, 1), "min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
} }
......
...@@ -8,7 +8,7 @@ from torch.nn.modules.batchnorm import BatchNorm2d ...@@ -8,7 +8,7 @@ from torch.nn.modules.batchnorm import BatchNorm2d
from torch.nn.modules.instancenorm import InstanceNorm2d from torch.nn.modules.instancenorm import InstanceNorm2d
from torchvision.ops import Conv2dNormActivation from torchvision.ops import Conv2dNormActivation
from ...transforms._presets import OpticalFlow, InterpolationMode from ...transforms._presets import OpticalFlow
from ...utils import _log_api_usage_once from ...utils import _log_api_usage_once
from .._api import Weights, WeightsEnum from .._api import Weights, WeightsEnum
from .._utils import handle_legacy_interface from .._utils import handle_legacy_interface
...@@ -514,8 +514,6 @@ class RAFT(nn.Module): ...@@ -514,8 +514,6 @@ class RAFT(nn.Module):
_COMMON_META = { _COMMON_META = {
"task": "optical_flow", "task": "optical_flow",
"architecture": "RAFT", "architecture": "RAFT",
"publication_year": 2020,
"interpolation": InterpolationMode.BILINEAR,
} }
......
...@@ -7,7 +7,7 @@ import torch.nn as nn ...@@ -7,7 +7,7 @@ import torch.nn as nn
from torch import Tensor from torch import Tensor
from torch.nn import functional as F from torch.nn import functional as F
from ...transforms._presets import ImageClassification, InterpolationMode from ...transforms._presets import ImageClassification
from .._api import WeightsEnum, Weights from .._api import WeightsEnum, Weights
from .._meta import _IMAGENET_CATEGORIES from .._meta import _IMAGENET_CATEGORIES
from .._utils import handle_legacy_interface, _ovewrite_named_param from .._utils import handle_legacy_interface, _ovewrite_named_param
...@@ -113,12 +113,10 @@ class GoogLeNet_QuantizedWeights(WeightsEnum): ...@@ -113,12 +113,10 @@ class GoogLeNet_QuantizedWeights(WeightsEnum):
meta={ meta={
"task": "image_classification", "task": "image_classification",
"architecture": "GoogLeNet", "architecture": "GoogLeNet",
"publication_year": 2014,
"num_params": 6624904, "num_params": 6624904,
"size": (224, 224), "size": (224, 224),
"min_size": (15, 15), "min_size": (15, 15),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "fbgemm", "backend": "fbgemm",
"quantization": "Post Training Quantization", "quantization": "Post Training Quantization",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
......
...@@ -9,7 +9,7 @@ from torch import Tensor ...@@ -9,7 +9,7 @@ from torch import Tensor
from torchvision.models import inception as inception_module from torchvision.models import inception as inception_module
from torchvision.models.inception import InceptionOutputs, Inception_V3_Weights from torchvision.models.inception import InceptionOutputs, Inception_V3_Weights
from ...transforms._presets import ImageClassification, InterpolationMode from ...transforms._presets import ImageClassification
from .._api import WeightsEnum, Weights from .._api import WeightsEnum, Weights
from .._meta import _IMAGENET_CATEGORIES from .._meta import _IMAGENET_CATEGORIES
from .._utils import handle_legacy_interface, _ovewrite_named_param from .._utils import handle_legacy_interface, _ovewrite_named_param
...@@ -179,12 +179,10 @@ class Inception_V3_QuantizedWeights(WeightsEnum): ...@@ -179,12 +179,10 @@ class Inception_V3_QuantizedWeights(WeightsEnum):
meta={ meta={
"task": "image_classification", "task": "image_classification",
"architecture": "InceptionV3", "architecture": "InceptionV3",
"publication_year": 2015,
"num_params": 27161264, "num_params": 27161264,
"size": (299, 299), "size": (299, 299),
"min_size": (75, 75), "min_size": (75, 75),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "fbgemm", "backend": "fbgemm",
"quantization": "Post Training Quantization", "quantization": "Post Training Quantization",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment