Unverified Commit b5aa0915 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Improved meta-data for models (#5170)

* Improved meta-data for models.

* Addressing comments from code-review.

* Add parameter count.

* Fix linter.
parent 5dc61cb0
...@@ -61,6 +61,10 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum): ...@@ -61,6 +61,10 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum):
url="https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth", url="https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth",
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
"task": "image_classification",
"architecture": "MobileNetV3",
"publication_year": 2019,
"num_params": 5483032,
"size": (224, 224), "size": (224, 224),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
......
...@@ -54,6 +54,7 @@ def _resnet( ...@@ -54,6 +54,7 @@ def _resnet(
_COMMON_META = { _COMMON_META = {
"task": "image_classification",
"size": (224, 224), "size": (224, 224),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
...@@ -69,6 +70,9 @@ class ResNet18_QuantizedWeights(WeightsEnum): ...@@ -69,6 +70,9 @@ class ResNet18_QuantizedWeights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 11689512,
"unquantized": ResNet18_Weights.ImageNet1K_V1, "unquantized": ResNet18_Weights.ImageNet1K_V1,
"acc@1": 69.494, "acc@1": 69.494,
"acc@5": 88.882, "acc@5": 88.882,
...@@ -83,6 +87,9 @@ class ResNet50_QuantizedWeights(WeightsEnum): ...@@ -83,6 +87,9 @@ class ResNet50_QuantizedWeights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 25557032,
"unquantized": ResNet50_Weights.ImageNet1K_V1, "unquantized": ResNet50_Weights.ImageNet1K_V1,
"acc@1": 75.920, "acc@1": 75.920,
"acc@5": 92.814, "acc@5": 92.814,
...@@ -93,6 +100,9 @@ class ResNet50_QuantizedWeights(WeightsEnum): ...@@ -93,6 +100,9 @@ class ResNet50_QuantizedWeights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 25557032,
"unquantized": ResNet50_Weights.ImageNet1K_V2, "unquantized": ResNet50_Weights.ImageNet1K_V2,
"acc@1": 80.282, "acc@1": 80.282,
"acc@5": 94.976, "acc@5": 94.976,
...@@ -107,6 +117,9 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): ...@@ -107,6 +117,9 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNeXt",
"publication_year": 2016,
"num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.ImageNet1K_V1, "unquantized": ResNeXt101_32X8D_Weights.ImageNet1K_V1,
"acc@1": 78.986, "acc@1": 78.986,
"acc@5": 94.480, "acc@5": 94.480,
...@@ -117,6 +130,9 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): ...@@ -117,6 +130,9 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNeXt",
"publication_year": 2016,
"num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.ImageNet1K_V2, "unquantized": ResNeXt101_32X8D_Weights.ImageNet1K_V2,
"acc@1": 82.574, "acc@1": 82.574,
"acc@5": 96.132, "acc@5": 96.132,
......
...@@ -51,6 +51,9 @@ def _shufflenetv2( ...@@ -51,6 +51,9 @@ def _shufflenetv2(
_COMMON_META = { _COMMON_META = {
"task": "image_classification",
"architecture": "ShuffleNetV2",
"publication_year": 2018,
"size": (224, 224), "size": (224, 224),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
...@@ -66,6 +69,7 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum): ...@@ -66,6 +69,7 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 1366792,
"unquantized": ShuffleNet_V2_X0_5_Weights.ImageNet1K_V1, "unquantized": ShuffleNet_V2_X0_5_Weights.ImageNet1K_V1,
"acc@1": 57.972, "acc@1": 57.972,
"acc@5": 79.780, "acc@5": 79.780,
...@@ -80,6 +84,7 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum): ...@@ -80,6 +84,7 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 2278604,
"unquantized": ShuffleNet_V2_X1_0_Weights.ImageNet1K_V1, "unquantized": ShuffleNet_V2_X1_0_Weights.ImageNet1K_V1,
"acc@1": 68.360, "acc@1": 68.360,
"acc@5": 87.582, "acc@5": 87.582,
......
...@@ -43,7 +43,14 @@ __all__ = [ ...@@ -43,7 +43,14 @@ __all__ = [
"regnet_x_32gf", "regnet_x_32gf",
] ]
_COMMON_META = {"size": (224, 224), "categories": _IMAGENET_CATEGORIES, "interpolation": InterpolationMode.BILINEAR} _COMMON_META = {
"task": "image_classification",
"architecture": "RegNet",
"publication_year": 2020,
"size": (224, 224),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
}
def _regnet( def _regnet(
...@@ -70,6 +77,7 @@ class RegNet_Y_400MF_Weights(WeightsEnum): ...@@ -70,6 +77,7 @@ class RegNet_Y_400MF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 4344144,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 74.046, "acc@1": 74.046,
"acc@5": 91.716, "acc@5": 91.716,
...@@ -80,6 +88,7 @@ class RegNet_Y_400MF_Weights(WeightsEnum): ...@@ -80,6 +88,7 @@ class RegNet_Y_400MF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 4344144,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 75.804, "acc@1": 75.804,
"acc@5": 92.742, "acc@5": 92.742,
...@@ -94,6 +103,7 @@ class RegNet_Y_800MF_Weights(WeightsEnum): ...@@ -94,6 +103,7 @@ class RegNet_Y_800MF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 6432512,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 76.420, "acc@1": 76.420,
"acc@5": 93.136, "acc@5": 93.136,
...@@ -104,6 +114,7 @@ class RegNet_Y_800MF_Weights(WeightsEnum): ...@@ -104,6 +114,7 @@ class RegNet_Y_800MF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 6432512,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 78.828, "acc@1": 78.828,
"acc@5": 94.502, "acc@5": 94.502,
...@@ -118,6 +129,7 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): ...@@ -118,6 +129,7 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 11202430,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 77.950, "acc@1": 77.950,
"acc@5": 93.966, "acc@5": 93.966,
...@@ -128,6 +140,7 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): ...@@ -128,6 +140,7 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 11202430,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 80.876, "acc@1": 80.876,
"acc@5": 95.444, "acc@5": 95.444,
...@@ -142,6 +155,7 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): ...@@ -142,6 +155,7 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 19436338,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 78.948, "acc@1": 78.948,
"acc@5": 94.576, "acc@5": 94.576,
...@@ -152,6 +166,7 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): ...@@ -152,6 +166,7 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 19436338,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.982, "acc@1": 81.982,
"acc@5": 95.972, "acc@5": 95.972,
...@@ -166,6 +181,7 @@ class RegNet_Y_8GF_Weights(WeightsEnum): ...@@ -166,6 +181,7 @@ class RegNet_Y_8GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 39381472,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 80.032, "acc@1": 80.032,
"acc@5": 95.048, "acc@5": 95.048,
...@@ -176,6 +192,7 @@ class RegNet_Y_8GF_Weights(WeightsEnum): ...@@ -176,6 +192,7 @@ class RegNet_Y_8GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 39381472,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.828, "acc@1": 82.828,
"acc@5": 96.330, "acc@5": 96.330,
...@@ -190,6 +207,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum): ...@@ -190,6 +207,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 83590140,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"acc@1": 80.424, "acc@1": 80.424,
"acc@5": 95.240, "acc@5": 95.240,
...@@ -200,6 +218,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum): ...@@ -200,6 +218,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 83590140,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.886, "acc@1": 82.886,
"acc@5": 96.328, "acc@5": 96.328,
...@@ -214,6 +233,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum): ...@@ -214,6 +233,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 145046770,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"acc@1": 80.878, "acc@1": 80.878,
"acc@5": 95.340, "acc@5": 95.340,
...@@ -224,6 +244,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum): ...@@ -224,6 +244,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 145046770,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 83.368, "acc@1": 83.368,
"acc@5": 96.498, "acc@5": 96.498,
...@@ -238,6 +259,7 @@ class RegNet_X_400MF_Weights(WeightsEnum): ...@@ -238,6 +259,7 @@ class RegNet_X_400MF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 5495976,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 72.834, "acc@1": 72.834,
"acc@5": 90.950, "acc@5": 90.950,
...@@ -248,6 +270,7 @@ class RegNet_X_400MF_Weights(WeightsEnum): ...@@ -248,6 +270,7 @@ class RegNet_X_400MF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 5495976,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 74.864, "acc@1": 74.864,
"acc@5": 92.322, "acc@5": 92.322,
...@@ -262,6 +285,7 @@ class RegNet_X_800MF_Weights(WeightsEnum): ...@@ -262,6 +285,7 @@ class RegNet_X_800MF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 7259656,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 75.212, "acc@1": 75.212,
"acc@5": 92.348, "acc@5": 92.348,
...@@ -272,6 +296,7 @@ class RegNet_X_800MF_Weights(WeightsEnum): ...@@ -272,6 +296,7 @@ class RegNet_X_800MF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 7259656,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 77.522, "acc@1": 77.522,
"acc@5": 93.826, "acc@5": 93.826,
...@@ -286,6 +311,7 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): ...@@ -286,6 +311,7 @@ class RegNet_X_1_6GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 9190136,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 77.040, "acc@1": 77.040,
"acc@5": 93.440, "acc@5": 93.440,
...@@ -296,6 +322,7 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): ...@@ -296,6 +322,7 @@ class RegNet_X_1_6GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 9190136,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 79.668, "acc@1": 79.668,
"acc@5": 94.922, "acc@5": 94.922,
...@@ -310,6 +337,7 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): ...@@ -310,6 +337,7 @@ class RegNet_X_3_2GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 15296552,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 78.364, "acc@1": 78.364,
"acc@5": 93.992, "acc@5": 93.992,
...@@ -320,6 +348,7 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): ...@@ -320,6 +348,7 @@ class RegNet_X_3_2GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 15296552,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.196, "acc@1": 81.196,
"acc@5": 95.430, "acc@5": 95.430,
...@@ -334,6 +363,7 @@ class RegNet_X_8GF_Weights(WeightsEnum): ...@@ -334,6 +363,7 @@ class RegNet_X_8GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 39572648,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 79.344, "acc@1": 79.344,
"acc@5": 94.686, "acc@5": 94.686,
...@@ -344,6 +374,7 @@ class RegNet_X_8GF_Weights(WeightsEnum): ...@@ -344,6 +374,7 @@ class RegNet_X_8GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 39572648,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.682, "acc@1": 81.682,
"acc@5": 95.678, "acc@5": 95.678,
...@@ -358,6 +389,7 @@ class RegNet_X_16GF_Weights(WeightsEnum): ...@@ -358,6 +389,7 @@ class RegNet_X_16GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 54278536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 80.058, "acc@1": 80.058,
"acc@5": 94.944, "acc@5": 94.944,
...@@ -368,6 +400,7 @@ class RegNet_X_16GF_Weights(WeightsEnum): ...@@ -368,6 +400,7 @@ class RegNet_X_16GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 54278536,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.716, "acc@1": 82.716,
"acc@5": 96.196, "acc@5": 96.196,
...@@ -382,6 +415,7 @@ class RegNet_X_32GF_Weights(WeightsEnum): ...@@ -382,6 +415,7 @@ class RegNet_X_32GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 107811560,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"acc@1": 80.622, "acc@1": 80.622,
"acc@5": 95.248, "acc@5": 95.248,
...@@ -392,6 +426,7 @@ class RegNet_X_32GF_Weights(WeightsEnum): ...@@ -392,6 +426,7 @@ class RegNet_X_32GF_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 107811560,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 83.014, "acc@1": 83.014,
"acc@5": 96.288, "acc@5": 96.288,
......
...@@ -51,7 +51,12 @@ def _resnet( ...@@ -51,7 +51,12 @@ def _resnet(
return model return model
_COMMON_META = {"size": (224, 224), "categories": _IMAGENET_CATEGORIES, "interpolation": InterpolationMode.BILINEAR} _COMMON_META = {
"task": "image_classification",
"size": (224, 224),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
}
class ResNet18_Weights(WeightsEnum): class ResNet18_Weights(WeightsEnum):
...@@ -60,6 +65,9 @@ class ResNet18_Weights(WeightsEnum): ...@@ -60,6 +65,9 @@ class ResNet18_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 11689512,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 69.758, "acc@1": 69.758,
"acc@5": 89.078, "acc@5": 89.078,
...@@ -74,6 +82,9 @@ class ResNet34_Weights(WeightsEnum): ...@@ -74,6 +82,9 @@ class ResNet34_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 21797672,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 73.314, "acc@1": 73.314,
"acc@5": 91.420, "acc@5": 91.420,
...@@ -88,6 +99,9 @@ class ResNet50_Weights(WeightsEnum): ...@@ -88,6 +99,9 @@ class ResNet50_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 25557032,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 76.130, "acc@1": 76.130,
"acc@5": 92.862, "acc@5": 92.862,
...@@ -98,6 +112,9 @@ class ResNet50_Weights(WeightsEnum): ...@@ -98,6 +112,9 @@ class ResNet50_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 25557032,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 80.674, "acc@1": 80.674,
"acc@5": 95.166, "acc@5": 95.166,
...@@ -112,6 +129,9 @@ class ResNet101_Weights(WeightsEnum): ...@@ -112,6 +129,9 @@ class ResNet101_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 44549160,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 77.374, "acc@1": 77.374,
"acc@5": 93.546, "acc@5": 93.546,
...@@ -122,6 +142,9 @@ class ResNet101_Weights(WeightsEnum): ...@@ -122,6 +142,9 @@ class ResNet101_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 44549160,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.886, "acc@1": 81.886,
"acc@5": 95.780, "acc@5": 95.780,
...@@ -136,6 +159,9 @@ class ResNet152_Weights(WeightsEnum): ...@@ -136,6 +159,9 @@ class ResNet152_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 60192808,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 78.312, "acc@1": 78.312,
"acc@5": 94.046, "acc@5": 94.046,
...@@ -146,6 +172,9 @@ class ResNet152_Weights(WeightsEnum): ...@@ -146,6 +172,9 @@ class ResNet152_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNet",
"publication_year": 2015,
"num_params": 60192808,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.284, "acc@1": 82.284,
"acc@5": 96.002, "acc@5": 96.002,
...@@ -160,6 +189,9 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): ...@@ -160,6 +189,9 @@ class ResNeXt50_32X4D_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNeXt",
"publication_year": 2016,
"num_params": 25028904,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
"acc@1": 77.618, "acc@1": 77.618,
"acc@5": 93.698, "acc@5": 93.698,
...@@ -170,6 +202,9 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): ...@@ -170,6 +202,9 @@ class ResNeXt50_32X4D_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNeXt",
"publication_year": 2016,
"num_params": 25028904,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.198, "acc@1": 81.198,
"acc@5": 95.340, "acc@5": 95.340,
...@@ -184,6 +219,9 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): ...@@ -184,6 +219,9 @@ class ResNeXt101_32X8D_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNeXt",
"publication_year": 2016,
"num_params": 88791336,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
"acc@1": 79.312, "acc@1": 79.312,
"acc@5": 94.526, "acc@5": 94.526,
...@@ -194,6 +232,9 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): ...@@ -194,6 +232,9 @@ class ResNeXt101_32X8D_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "ResNeXt",
"publication_year": 2016,
"num_params": 88791336,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 82.834, "acc@1": 82.834,
"acc@5": 96.228, "acc@5": 96.228,
...@@ -208,6 +249,9 @@ class Wide_ResNet50_2_Weights(WeightsEnum): ...@@ -208,6 +249,9 @@ class Wide_ResNet50_2_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "WideResNet",
"publication_year": 2016,
"num_params": 68883240,
"recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439", "recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
"acc@1": 78.468, "acc@1": 78.468,
"acc@5": 94.086, "acc@5": 94.086,
...@@ -218,6 +262,9 @@ class Wide_ResNet50_2_Weights(WeightsEnum): ...@@ -218,6 +262,9 @@ class Wide_ResNet50_2_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "WideResNet",
"publication_year": 2016,
"num_params": 68883240,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 81.602, "acc@1": 81.602,
"acc@5": 95.758, "acc@5": 95.758,
...@@ -232,6 +279,9 @@ class Wide_ResNet101_2_Weights(WeightsEnum): ...@@ -232,6 +279,9 @@ class Wide_ResNet101_2_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "WideResNet",
"publication_year": 2016,
"num_params": 126886696,
"recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439", "recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
"acc@1": 78.848, "acc@1": 78.848,
"acc@5": 94.284, "acc@5": 94.284,
...@@ -242,6 +292,9 @@ class Wide_ResNet101_2_Weights(WeightsEnum): ...@@ -242,6 +292,9 @@ class Wide_ResNet101_2_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "WideResNet",
"publication_year": 2016,
"num_params": 126886696,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.510, "acc@1": 82.510,
"acc@5": 96.020, "acc@5": 96.020,
......
...@@ -25,6 +25,9 @@ __all__ = [ ...@@ -25,6 +25,9 @@ __all__ = [
_COMMON_META = { _COMMON_META = {
"task": "image_semantic_segmentation",
"architecture": "DeepLabV3",
"publication_year": 2017,
"categories": _VOC_CATEGORIES, "categories": _VOC_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
} }
...@@ -36,6 +39,7 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum): ...@@ -36,6 +39,7 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum):
transforms=partial(VocEval, resize_size=520), transforms=partial(VocEval, resize_size=520),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 42004074,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet50", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet50",
"mIoU": 66.4, "mIoU": 66.4,
"acc": 92.4, "acc": 92.4,
...@@ -50,6 +54,7 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum): ...@@ -50,6 +54,7 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum):
transforms=partial(VocEval, resize_size=520), transforms=partial(VocEval, resize_size=520),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 60996202,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet101", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet101",
"mIoU": 67.4, "mIoU": 67.4,
"acc": 92.4, "acc": 92.4,
...@@ -64,6 +69,7 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum): ...@@ -64,6 +69,7 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum):
transforms=partial(VocEval, resize_size=520), transforms=partial(VocEval, resize_size=520),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 11029328,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_mobilenet_v3_large", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_mobilenet_v3_large",
"mIoU": 60.3, "mIoU": 60.3,
"acc": 91.2, "acc": 91.2,
......
...@@ -15,6 +15,9 @@ __all__ = ["FCN", "FCN_ResNet50_Weights", "FCN_ResNet101_Weights", "fcn_resnet50 ...@@ -15,6 +15,9 @@ __all__ = ["FCN", "FCN_ResNet50_Weights", "FCN_ResNet101_Weights", "fcn_resnet50
_COMMON_META = { _COMMON_META = {
"task": "image_semantic_segmentation",
"architecture": "FCN",
"publication_year": 2014,
"categories": _VOC_CATEGORIES, "categories": _VOC_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
} }
...@@ -26,6 +29,7 @@ class FCN_ResNet50_Weights(WeightsEnum): ...@@ -26,6 +29,7 @@ class FCN_ResNet50_Weights(WeightsEnum):
transforms=partial(VocEval, resize_size=520), transforms=partial(VocEval, resize_size=520),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 35322218,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet50", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet50",
"mIoU": 60.5, "mIoU": 60.5,
"acc": 91.4, "acc": 91.4,
...@@ -40,6 +44,7 @@ class FCN_ResNet101_Weights(WeightsEnum): ...@@ -40,6 +44,7 @@ class FCN_ResNet101_Weights(WeightsEnum):
transforms=partial(VocEval, resize_size=520), transforms=partial(VocEval, resize_size=520),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 54314346,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet101", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet101",
"mIoU": 63.7, "mIoU": 63.7,
"acc": 91.9, "acc": 91.9,
......
...@@ -19,6 +19,10 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum): ...@@ -19,6 +19,10 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum):
url="https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth", url="https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth",
transforms=partial(VocEval, resize_size=520), transforms=partial(VocEval, resize_size=520),
meta={ meta={
"task": "image_semantic_segmentation",
"architecture": "LRASPP",
"publication_year": 2019,
"num_params": 3221538,
"categories": _VOC_CATEGORIES, "categories": _VOC_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#lraspp_mobilenet_v3_large", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#lraspp_mobilenet_v3_large",
......
...@@ -41,6 +41,9 @@ def _shufflenetv2( ...@@ -41,6 +41,9 @@ def _shufflenetv2(
_COMMON_META = { _COMMON_META = {
"task": "image_classification",
"architecture": "ShuffleNetV2",
"publication_year": 2018,
"size": (224, 224), "size": (224, 224),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
...@@ -54,6 +57,7 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum): ...@@ -54,6 +57,7 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 1366792,
"acc@1": 69.362, "acc@1": 69.362,
"acc@5": 88.316, "acc@5": 88.316,
}, },
...@@ -67,6 +71,7 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum): ...@@ -67,6 +71,7 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 2278604,
"acc@1": 60.552, "acc@1": 60.552,
"acc@5": 81.746, "acc@5": 81.746,
}, },
......
...@@ -14,6 +14,9 @@ __all__ = ["SqueezeNet", "SqueezeNet1_0_Weights", "SqueezeNet1_1_Weights", "sque ...@@ -14,6 +14,9 @@ __all__ = ["SqueezeNet", "SqueezeNet1_0_Weights", "SqueezeNet1_1_Weights", "sque
_COMMON_META = { _COMMON_META = {
"task": "image_classification",
"architecture": "SqueezeNet",
"publication_year": 2016,
"size": (224, 224), "size": (224, 224),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
...@@ -27,6 +30,7 @@ class SqueezeNet1_0_Weights(WeightsEnum): ...@@ -27,6 +30,7 @@ class SqueezeNet1_0_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 1248424,
"acc@1": 58.092, "acc@1": 58.092,
"acc@5": 80.420, "acc@5": 80.420,
}, },
...@@ -40,6 +44,7 @@ class SqueezeNet1_1_Weights(WeightsEnum): ...@@ -40,6 +44,7 @@ class SqueezeNet1_1_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 1235496,
"acc@1": 58.178, "acc@1": 58.178,
"acc@5": 80.624, "acc@5": 80.624,
}, },
......
...@@ -41,6 +41,9 @@ def _vgg(cfg: str, batch_norm: bool, weights: Optional[WeightsEnum], progress: b ...@@ -41,6 +41,9 @@ def _vgg(cfg: str, batch_norm: bool, weights: Optional[WeightsEnum], progress: b
_COMMON_META = { _COMMON_META = {
"task": "image_classification",
"architecture": "VGG",
"publication_year": 2014,
"size": (224, 224), "size": (224, 224),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
...@@ -54,6 +57,7 @@ class VGG11_Weights(WeightsEnum): ...@@ -54,6 +57,7 @@ class VGG11_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 132863336,
"acc@1": 69.020, "acc@1": 69.020,
"acc@5": 88.628, "acc@5": 88.628,
}, },
...@@ -67,6 +71,7 @@ class VGG11_BN_Weights(WeightsEnum): ...@@ -67,6 +71,7 @@ class VGG11_BN_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 132868840,
"acc@1": 70.370, "acc@1": 70.370,
"acc@5": 89.810, "acc@5": 89.810,
}, },
...@@ -80,6 +85,7 @@ class VGG13_Weights(WeightsEnum): ...@@ -80,6 +85,7 @@ class VGG13_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 133047848,
"acc@1": 69.928, "acc@1": 69.928,
"acc@5": 89.246, "acc@5": 89.246,
}, },
...@@ -93,6 +99,7 @@ class VGG13_BN_Weights(WeightsEnum): ...@@ -93,6 +99,7 @@ class VGG13_BN_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 133053736,
"acc@1": 71.586, "acc@1": 71.586,
"acc@5": 90.374, "acc@5": 90.374,
}, },
...@@ -106,6 +113,7 @@ class VGG16_Weights(WeightsEnum): ...@@ -106,6 +113,7 @@ class VGG16_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 138357544,
"acc@1": 71.592, "acc@1": 71.592,
"acc@5": 90.382, "acc@5": 90.382,
}, },
...@@ -119,9 +127,9 @@ class VGG16_Weights(WeightsEnum): ...@@ -119,9 +127,9 @@ class VGG16_Weights(WeightsEnum):
ImageNetEval, crop_size=224, mean=(0.48235, 0.45882, 0.40784), std=(1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0) ImageNetEval, crop_size=224, mean=(0.48235, 0.45882, 0.40784), std=(1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0)
), ),
meta={ meta={
"size": (224, 224), **_COMMON_META,
"num_params": 138357544,
"categories": None, "categories": None,
"interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/amdegroot/ssd.pytorch#training-ssd", "recipe": "https://github.com/amdegroot/ssd.pytorch#training-ssd",
"acc@1": float("nan"), "acc@1": float("nan"),
"acc@5": float("nan"), "acc@5": float("nan"),
...@@ -136,6 +144,7 @@ class VGG16_BN_Weights(WeightsEnum): ...@@ -136,6 +144,7 @@ class VGG16_BN_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 138365992,
"acc@1": 73.360, "acc@1": 73.360,
"acc@5": 91.516, "acc@5": 91.516,
}, },
...@@ -149,6 +158,7 @@ class VGG19_Weights(WeightsEnum): ...@@ -149,6 +158,7 @@ class VGG19_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 143667240,
"acc@1": 72.376, "acc@1": 72.376,
"acc@5": 90.876, "acc@5": 90.876,
}, },
...@@ -162,6 +172,7 @@ class VGG19_BN_Weights(WeightsEnum): ...@@ -162,6 +172,7 @@ class VGG19_BN_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 143678248,
"acc@1": 74.218, "acc@1": 74.218,
"acc@5": 91.842, "acc@5": 91.842,
}, },
......
...@@ -52,6 +52,8 @@ def _video_resnet( ...@@ -52,6 +52,8 @@ def _video_resnet(
_COMMON_META = { _COMMON_META = {
"task": "video_classification",
"publication_year": 2017,
"size": (112, 112), "size": (112, 112),
"categories": _KINETICS400_CATEGORIES, "categories": _KINETICS400_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
...@@ -65,6 +67,8 @@ class R3D_18_Weights(WeightsEnum): ...@@ -65,6 +67,8 @@ class R3D_18_Weights(WeightsEnum):
transforms=partial(Kinect400Eval, crop_size=(112, 112), resize_size=(128, 171)), transforms=partial(Kinect400Eval, crop_size=(112, 112), resize_size=(128, 171)),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "R3D",
"num_params": 33371472,
"acc@1": 52.75, "acc@1": 52.75,
"acc@5": 75.45, "acc@5": 75.45,
}, },
...@@ -78,6 +82,8 @@ class MC3_18_Weights(WeightsEnum): ...@@ -78,6 +82,8 @@ class MC3_18_Weights(WeightsEnum):
transforms=partial(Kinect400Eval, crop_size=(112, 112), resize_size=(128, 171)), transforms=partial(Kinect400Eval, crop_size=(112, 112), resize_size=(128, 171)),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "MC3",
"num_params": 11695440,
"acc@1": 53.90, "acc@1": 53.90,
"acc@5": 76.29, "acc@5": 76.29,
}, },
...@@ -91,6 +97,8 @@ class R2Plus1D_18_Weights(WeightsEnum): ...@@ -91,6 +97,8 @@ class R2Plus1D_18_Weights(WeightsEnum):
transforms=partial(Kinect400Eval, crop_size=(112, 112), resize_size=(128, 171)), transforms=partial(Kinect400Eval, crop_size=(112, 112), resize_size=(128, 171)),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"architecture": "R(2+1)D",
"num_params": 31505325,
"acc@1": 57.50, "acc@1": 57.50,
"acc@5": 78.81, "acc@5": 78.81,
}, },
......
...@@ -236,6 +236,9 @@ class VisionTransformer(nn.Module): ...@@ -236,6 +236,9 @@ class VisionTransformer(nn.Module):
_COMMON_META = { _COMMON_META = {
"task": "image_classification",
"architecture": "ViT",
"publication_year": 2020,
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
} }
...@@ -247,6 +250,7 @@ class ViT_B_16_Weights(WeightsEnum): ...@@ -247,6 +250,7 @@ class ViT_B_16_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 86567656,
"size": (224, 224), "size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_16", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_16",
"acc@1": 81.072, "acc@1": 81.072,
...@@ -262,6 +266,7 @@ class ViT_B_32_Weights(WeightsEnum): ...@@ -262,6 +266,7 @@ class ViT_B_32_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 88224232,
"size": (224, 224), "size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_32", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_32",
"acc@1": 75.912, "acc@1": 75.912,
...@@ -277,6 +282,7 @@ class ViT_L_16_Weights(WeightsEnum): ...@@ -277,6 +282,7 @@ class ViT_L_16_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224, resize_size=242), transforms=partial(ImageNetEval, crop_size=224, resize_size=242),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 304326632,
"size": (224, 224), "size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_16", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_16",
"acc@1": 79.662, "acc@1": 79.662,
...@@ -292,6 +298,7 @@ class ViT_L_32_Weights(WeightsEnum): ...@@ -292,6 +298,7 @@ class ViT_L_32_Weights(WeightsEnum):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 306535400,
"size": (224, 224), "size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_32", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_32",
"acc@1": 76.972, "acc@1": 76.972,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment