Unverified Commit c27bed45 authored by Fedor's avatar Fedor Committed by GitHub
Browse files

Adding min_size to classification and video models (#5223)



* Adding min_size as a required field.

* Adding min_size to classification models (quantized and not)

* Adding min_size to video models meta.

* Moving min_size to _COMMON_META

* Fixing extra line
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent e047623a
......@@ -97,7 +97,7 @@ def test_naming_conventions(model_fn):
)
@run_if_test_with_prototype
def test_schema_meta_validation(model_fn):
classification_fields = ["size", "categories", "acc@1", "acc@5"]
classification_fields = ["size", "categories", "acc@1", "acc@5", "min_size"]
defaults = {
"all": ["task", "architecture", "publication_year", "interpolation", "recipe", "num_params"],
"models": classification_fields,
......
......@@ -23,6 +23,7 @@ class AlexNet_Weights(WeightsEnum):
"publication_year": 2012,
"num_params": 61100840,
"size": (224, 224),
"min_size": (63, 63),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
......
......@@ -68,6 +68,7 @@ _COMMON_META = {
"architecture": "DenseNet",
"publication_year": 2016,
"size": (224, 224),
"min_size": (29, 29),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/pull/116",
......
......@@ -66,6 +66,7 @@ _COMMON_META = {
"task": "image_classification",
"architecture": "EfficientNet",
"publication_year": 2019,
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BICUBIC,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#efficientnet",
......
......@@ -24,6 +24,7 @@ class GoogLeNet_Weights(WeightsEnum):
"publication_year": 2014,
"num_params": 6624904,
"size": (224, 224),
"min_size": (15, 15),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#googlenet",
......
......@@ -23,6 +23,7 @@ class Inception_V3_Weights(WeightsEnum):
"publication_year": 2015,
"num_params": 27161264,
"size": (299, 299),
"min_size": (75, 75),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#inception-v3",
......
......@@ -28,6 +28,7 @@ _COMMON_META = {
"architecture": "MNASNet",
"publication_year": 2018,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/1e100/mnasnet_trainer",
......
......@@ -23,6 +23,7 @@ class MobileNet_V2_Weights(WeightsEnum):
"publication_year": 2018,
"num_params": 3504872,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2",
......
......@@ -42,6 +42,7 @@ _COMMON_META = {
"architecture": "MobileNetV3",
"publication_year": 2019,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
}
......
......@@ -33,6 +33,7 @@ class GoogLeNet_QuantizedWeights(WeightsEnum):
"publication_year": 2014,
"num_params": 6624904,
"size": (224, 224),
"min_size": (15, 15),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "fbgemm",
......
......@@ -32,6 +32,7 @@ class Inception_V3_QuantizedWeights(WeightsEnum):
"publication_year": 2015,
"num_params": 27161264,
"size": (299, 299),
"min_size": (75, 75),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "fbgemm",
......
......@@ -33,6 +33,7 @@ class MobileNet_V2_QuantizedWeights(WeightsEnum):
"publication_year": 2018,
"num_params": 3504872,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "qnnpack",
......
......@@ -66,6 +66,7 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum):
"publication_year": 2019,
"num_params": 5483032,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "qnnpack",
......
......@@ -56,6 +56,7 @@ def _resnet(
_COMMON_META = {
"task": "image_classification",
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "fbgemm",
......
......@@ -55,6 +55,7 @@ _COMMON_META = {
"architecture": "ShuffleNetV2",
"publication_year": 2018,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "fbgemm",
......
......@@ -50,6 +50,7 @@ _COMMON_META = {
"architecture": "RegNet",
"publication_year": 2020,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
}
......
......@@ -54,6 +54,7 @@ def _resnet(
_COMMON_META = {
"task": "image_classification",
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
}
......
......@@ -45,6 +45,7 @@ _COMMON_META = {
"architecture": "ShuffleNetV2",
"publication_year": 2018,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/barrh/Shufflenet-v2-Pytorch/tree/v0.1.0",
......
......@@ -30,6 +30,7 @@ class SqueezeNet1_0_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224),
meta={
**_COMMON_META,
"min_size": (21, 21),
"num_params": 1248424,
"acc@1": 58.092,
"acc@5": 80.420,
......@@ -44,6 +45,7 @@ class SqueezeNet1_1_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224),
meta={
**_COMMON_META,
"min_size": (17, 17),
"num_params": 1235496,
"acc@1": 58.178,
"acc@5": 80.624,
......
......@@ -45,6 +45,7 @@ _COMMON_META = {
"architecture": "VGG",
"publication_year": 2014,
"size": (224, 224),
"min_size": (32, 32),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment