Unverified Commit 6f016dd9 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Restructuring metrics meta-data (#5859)

* Restructuring metrics meta-data for detection, segmentation and optical flow.

* Renaming acc to pixel_acc for segmentation

* Restructure video meta-data.

* Restructure classification and quantization meta-data.

* Fix tests.

* Fix documentation
parent c82b86d1
...@@ -183,8 +183,10 @@ class Inception_V3_QuantizedWeights(WeightsEnum): ...@@ -183,8 +183,10 @@ class Inception_V3_QuantizedWeights(WeightsEnum):
"backend": "fbgemm", "backend": "fbgemm",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"unquantized": Inception_V3_Weights.IMAGENET1K_V1, "unquantized": Inception_V3_Weights.IMAGENET1K_V1,
"acc@1": 77.176, "metrics": {
"acc@5": 93.354, "acc@1": 77.176,
"acc@5": 93.354,
},
}, },
) )
DEFAULT = IMAGENET1K_FBGEMM_V1 DEFAULT = IMAGENET1K_FBGEMM_V1
......
...@@ -75,8 +75,10 @@ class MobileNet_V2_QuantizedWeights(WeightsEnum): ...@@ -75,8 +75,10 @@ class MobileNet_V2_QuantizedWeights(WeightsEnum):
"backend": "qnnpack", "backend": "qnnpack",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv2", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv2",
"unquantized": MobileNet_V2_Weights.IMAGENET1K_V1, "unquantized": MobileNet_V2_Weights.IMAGENET1K_V1,
"acc@1": 71.658, "metrics": {
"acc@5": 90.150, "acc@1": 71.658,
"acc@5": 90.150,
},
}, },
) )
DEFAULT = IMAGENET1K_QNNPACK_V1 DEFAULT = IMAGENET1K_QNNPACK_V1
......
...@@ -165,8 +165,10 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum): ...@@ -165,8 +165,10 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum):
"backend": "qnnpack", "backend": "qnnpack",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3",
"unquantized": MobileNet_V3_Large_Weights.IMAGENET1K_V1, "unquantized": MobileNet_V3_Large_Weights.IMAGENET1K_V1,
"acc@1": 73.004, "metrics": {
"acc@5": 90.858, "acc@1": 73.004,
"acc@5": 90.858,
},
}, },
) )
DEFAULT = IMAGENET1K_QNNPACK_V1 DEFAULT = IMAGENET1K_QNNPACK_V1
......
...@@ -162,8 +162,10 @@ class ResNet18_QuantizedWeights(WeightsEnum): ...@@ -162,8 +162,10 @@ class ResNet18_QuantizedWeights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 11689512, "num_params": 11689512,
"unquantized": ResNet18_Weights.IMAGENET1K_V1, "unquantized": ResNet18_Weights.IMAGENET1K_V1,
"acc@1": 69.494, "metrics": {
"acc@5": 88.882, "acc@1": 69.494,
"acc@5": 88.882,
},
}, },
) )
DEFAULT = IMAGENET1K_FBGEMM_V1 DEFAULT = IMAGENET1K_FBGEMM_V1
...@@ -177,8 +179,10 @@ class ResNet50_QuantizedWeights(WeightsEnum): ...@@ -177,8 +179,10 @@ class ResNet50_QuantizedWeights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 25557032, "num_params": 25557032,
"unquantized": ResNet50_Weights.IMAGENET1K_V1, "unquantized": ResNet50_Weights.IMAGENET1K_V1,
"acc@1": 75.920, "metrics": {
"acc@5": 92.814, "acc@1": 75.920,
"acc@5": 92.814,
},
}, },
) )
IMAGENET1K_FBGEMM_V2 = Weights( IMAGENET1K_FBGEMM_V2 = Weights(
...@@ -188,8 +192,10 @@ class ResNet50_QuantizedWeights(WeightsEnum): ...@@ -188,8 +192,10 @@ class ResNet50_QuantizedWeights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 25557032, "num_params": 25557032,
"unquantized": ResNet50_Weights.IMAGENET1K_V2, "unquantized": ResNet50_Weights.IMAGENET1K_V2,
"acc@1": 80.282, "metrics": {
"acc@5": 94.976, "acc@1": 80.282,
"acc@5": 94.976,
},
}, },
) )
DEFAULT = IMAGENET1K_FBGEMM_V2 DEFAULT = IMAGENET1K_FBGEMM_V2
...@@ -203,8 +209,10 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): ...@@ -203,8 +209,10 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 88791336, "num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V1, "unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
"acc@1": 78.986, "metrics": {
"acc@5": 94.480, "acc@1": 78.986,
"acc@5": 94.480,
},
}, },
) )
IMAGENET1K_FBGEMM_V2 = Weights( IMAGENET1K_FBGEMM_V2 = Weights(
...@@ -214,8 +222,10 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): ...@@ -214,8 +222,10 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 88791336, "num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V2, "unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V2,
"acc@1": 82.574, "metrics": {
"acc@5": 96.132, "acc@1": 82.574,
"acc@5": 96.132,
},
}, },
) )
DEFAULT = IMAGENET1K_FBGEMM_V2 DEFAULT = IMAGENET1K_FBGEMM_V2
......
...@@ -117,8 +117,10 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum): ...@@ -117,8 +117,10 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 1366792, "num_params": 1366792,
"unquantized": ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1, "unquantized": ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1,
"acc@1": 57.972, "metrics": {
"acc@5": 79.780, "acc@1": 57.972,
"acc@5": 79.780,
},
}, },
) )
DEFAULT = IMAGENET1K_FBGEMM_V1 DEFAULT = IMAGENET1K_FBGEMM_V1
...@@ -132,8 +134,10 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum): ...@@ -132,8 +134,10 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 2278604, "num_params": 2278604,
"unquantized": ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1, "unquantized": ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1,
"acc@1": 68.360, "metrics": {
"acc@5": 87.582, "acc@1": 68.360,
"acc@5": 87.582,
},
}, },
) )
DEFAULT = IMAGENET1K_FBGEMM_V1 DEFAULT = IMAGENET1K_FBGEMM_V1
......
...@@ -422,8 +422,10 @@ class RegNet_Y_400MF_Weights(WeightsEnum): ...@@ -422,8 +422,10 @@ class RegNet_Y_400MF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 4344144, "num_params": 4344144,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 74.046, "metrics": {
"acc@5": 91.716, "acc@1": 74.046,
"acc@5": 91.716,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -433,8 +435,10 @@ class RegNet_Y_400MF_Weights(WeightsEnum): ...@@ -433,8 +435,10 @@ class RegNet_Y_400MF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 4344144, "num_params": 4344144,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 75.804, "metrics": {
"acc@5": 92.742, "acc@1": 75.804,
"acc@5": 92.742,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -448,8 +452,10 @@ class RegNet_Y_800MF_Weights(WeightsEnum): ...@@ -448,8 +452,10 @@ class RegNet_Y_800MF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 6432512, "num_params": 6432512,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 76.420, "metrics": {
"acc@5": 93.136, "acc@1": 76.420,
"acc@5": 93.136,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -459,8 +465,10 @@ class RegNet_Y_800MF_Weights(WeightsEnum): ...@@ -459,8 +465,10 @@ class RegNet_Y_800MF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 6432512, "num_params": 6432512,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 78.828, "metrics": {
"acc@5": 94.502, "acc@1": 78.828,
"acc@5": 94.502,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -474,8 +482,10 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): ...@@ -474,8 +482,10 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 11202430, "num_params": 11202430,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 77.950, "metrics": {
"acc@5": 93.966, "acc@1": 77.950,
"acc@5": 93.966,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -485,8 +495,10 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): ...@@ -485,8 +495,10 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 11202430, "num_params": 11202430,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 80.876, "metrics": {
"acc@5": 95.444, "acc@1": 80.876,
"acc@5": 95.444,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -500,8 +512,10 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): ...@@ -500,8 +512,10 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 19436338, "num_params": 19436338,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 78.948, "metrics": {
"acc@5": 94.576, "acc@1": 78.948,
"acc@5": 94.576,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -511,8 +525,10 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): ...@@ -511,8 +525,10 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 19436338, "num_params": 19436338,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.982, "metrics": {
"acc@5": 95.972, "acc@1": 81.982,
"acc@5": 95.972,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -526,8 +542,10 @@ class RegNet_Y_8GF_Weights(WeightsEnum): ...@@ -526,8 +542,10 @@ class RegNet_Y_8GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 39381472, "num_params": 39381472,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 80.032, "metrics": {
"acc@5": 95.048, "acc@1": 80.032,
"acc@5": 95.048,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -537,8 +555,10 @@ class RegNet_Y_8GF_Weights(WeightsEnum): ...@@ -537,8 +555,10 @@ class RegNet_Y_8GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 39381472, "num_params": 39381472,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.828, "metrics": {
"acc@5": 96.330, "acc@1": 82.828,
"acc@5": 96.330,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -552,8 +572,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum): ...@@ -552,8 +572,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 83590140, "num_params": 83590140,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"acc@1": 80.424, "metrics": {
"acc@5": 95.240, "acc@1": 80.424,
"acc@5": 95.240,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -563,8 +585,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum): ...@@ -563,8 +585,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 83590140, "num_params": 83590140,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.886, "metrics": {
"acc@5": 96.328, "acc@1": 82.886,
"acc@5": 96.328,
},
}, },
) )
IMAGENET1K_SWAG_E2E_V1 = Weights( IMAGENET1K_SWAG_E2E_V1 = Weights(
...@@ -575,8 +599,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum): ...@@ -575,8 +599,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
meta={ meta={
**_COMMON_SWAG_META, **_COMMON_SWAG_META,
"num_params": 83590140, "num_params": 83590140,
"acc@1": 86.012, "metrics": {
"acc@5": 98.054, "acc@1": 86.012,
"acc@5": 98.054,
},
}, },
) )
IMAGENET1K_SWAG_LINEAR_V1 = Weights( IMAGENET1K_SWAG_LINEAR_V1 = Weights(
...@@ -588,8 +614,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum): ...@@ -588,8 +614,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
**_COMMON_SWAG_META, **_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793", "recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 83590140, "num_params": 83590140,
"acc@1": 83.976, "metrics": {
"acc@5": 97.244, "acc@1": 83.976,
"acc@5": 97.244,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -603,8 +631,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum): ...@@ -603,8 +631,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 145046770, "num_params": 145046770,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"acc@1": 80.878, "metrics": {
"acc@5": 95.340, "acc@1": 80.878,
"acc@5": 95.340,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -614,8 +644,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum): ...@@ -614,8 +644,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 145046770, "num_params": 145046770,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 83.368, "metrics": {
"acc@5": 96.498, "acc@1": 83.368,
"acc@5": 96.498,
},
}, },
) )
IMAGENET1K_SWAG_E2E_V1 = Weights( IMAGENET1K_SWAG_E2E_V1 = Weights(
...@@ -626,8 +658,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum): ...@@ -626,8 +658,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
meta={ meta={
**_COMMON_SWAG_META, **_COMMON_SWAG_META,
"num_params": 145046770, "num_params": 145046770,
"acc@1": 86.838, "metrics": {
"acc@5": 98.362, "acc@1": 86.838,
"acc@5": 98.362,
},
}, },
) )
IMAGENET1K_SWAG_LINEAR_V1 = Weights( IMAGENET1K_SWAG_LINEAR_V1 = Weights(
...@@ -639,8 +673,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum): ...@@ -639,8 +673,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
**_COMMON_SWAG_META, **_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793", "recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 145046770, "num_params": 145046770,
"acc@1": 84.622, "metrics": {
"acc@5": 97.480, "acc@1": 84.622,
"acc@5": 97.480,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -655,8 +691,10 @@ class RegNet_Y_128GF_Weights(WeightsEnum): ...@@ -655,8 +691,10 @@ class RegNet_Y_128GF_Weights(WeightsEnum):
meta={ meta={
**_COMMON_SWAG_META, **_COMMON_SWAG_META,
"num_params": 644812894, "num_params": 644812894,
"acc@1": 88.228, "metrics": {
"acc@5": 98.682, "acc@1": 88.228,
"acc@5": 98.682,
},
}, },
) )
IMAGENET1K_SWAG_LINEAR_V1 = Weights( IMAGENET1K_SWAG_LINEAR_V1 = Weights(
...@@ -668,8 +706,10 @@ class RegNet_Y_128GF_Weights(WeightsEnum): ...@@ -668,8 +706,10 @@ class RegNet_Y_128GF_Weights(WeightsEnum):
**_COMMON_SWAG_META, **_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793", "recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 644812894, "num_params": 644812894,
"acc@1": 86.068, "metrics": {
"acc@5": 97.844, "acc@1": 86.068,
"acc@5": 97.844,
},
}, },
) )
DEFAULT = IMAGENET1K_SWAG_E2E_V1 DEFAULT = IMAGENET1K_SWAG_E2E_V1
...@@ -683,8 +723,10 @@ class RegNet_X_400MF_Weights(WeightsEnum): ...@@ -683,8 +723,10 @@ class RegNet_X_400MF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 5495976, "num_params": 5495976,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 72.834, "metrics": {
"acc@5": 90.950, "acc@1": 72.834,
"acc@5": 90.950,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -694,8 +736,10 @@ class RegNet_X_400MF_Weights(WeightsEnum): ...@@ -694,8 +736,10 @@ class RegNet_X_400MF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 5495976, "num_params": 5495976,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 74.864, "metrics": {
"acc@5": 92.322, "acc@1": 74.864,
"acc@5": 92.322,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -709,8 +753,10 @@ class RegNet_X_800MF_Weights(WeightsEnum): ...@@ -709,8 +753,10 @@ class RegNet_X_800MF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 7259656, "num_params": 7259656,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 75.212, "metrics": {
"acc@5": 92.348, "acc@1": 75.212,
"acc@5": 92.348,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -720,8 +766,10 @@ class RegNet_X_800MF_Weights(WeightsEnum): ...@@ -720,8 +766,10 @@ class RegNet_X_800MF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 7259656, "num_params": 7259656,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 77.522, "metrics": {
"acc@5": 93.826, "acc@1": 77.522,
"acc@5": 93.826,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -735,8 +783,10 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): ...@@ -735,8 +783,10 @@ class RegNet_X_1_6GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 9190136, "num_params": 9190136,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 77.040, "metrics": {
"acc@5": 93.440, "acc@1": 77.040,
"acc@5": 93.440,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -746,8 +796,10 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): ...@@ -746,8 +796,10 @@ class RegNet_X_1_6GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 9190136, "num_params": 9190136,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 79.668, "metrics": {
"acc@5": 94.922, "acc@1": 79.668,
"acc@5": 94.922,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -761,8 +813,10 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): ...@@ -761,8 +813,10 @@ class RegNet_X_3_2GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 15296552, "num_params": 15296552,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 78.364, "metrics": {
"acc@5": 93.992, "acc@1": 78.364,
"acc@5": 93.992,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -772,8 +826,10 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): ...@@ -772,8 +826,10 @@ class RegNet_X_3_2GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 15296552, "num_params": 15296552,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.196, "metrics": {
"acc@5": 95.430, "acc@1": 81.196,
"acc@5": 95.430,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -787,8 +843,10 @@ class RegNet_X_8GF_Weights(WeightsEnum): ...@@ -787,8 +843,10 @@ class RegNet_X_8GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 39572648, "num_params": 39572648,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 79.344, "metrics": {
"acc@5": 94.686, "acc@1": 79.344,
"acc@5": 94.686,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -798,8 +856,10 @@ class RegNet_X_8GF_Weights(WeightsEnum): ...@@ -798,8 +856,10 @@ class RegNet_X_8GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 39572648, "num_params": 39572648,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.682, "metrics": {
"acc@5": 95.678, "acc@1": 81.682,
"acc@5": 95.678,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -813,8 +873,10 @@ class RegNet_X_16GF_Weights(WeightsEnum): ...@@ -813,8 +873,10 @@ class RegNet_X_16GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 54278536, "num_params": 54278536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 80.058, "metrics": {
"acc@5": 94.944, "acc@1": 80.058,
"acc@5": 94.944,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -824,8 +886,10 @@ class RegNet_X_16GF_Weights(WeightsEnum): ...@@ -824,8 +886,10 @@ class RegNet_X_16GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 54278536, "num_params": 54278536,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.716, "metrics": {
"acc@5": 96.196, "acc@1": 82.716,
"acc@5": 96.196,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -839,8 +903,10 @@ class RegNet_X_32GF_Weights(WeightsEnum): ...@@ -839,8 +903,10 @@ class RegNet_X_32GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 107811560, "num_params": 107811560,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"acc@1": 80.622, "metrics": {
"acc@5": 95.248, "acc@1": 80.622,
"acc@5": 95.248,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -850,8 +916,10 @@ class RegNet_X_32GF_Weights(WeightsEnum): ...@@ -850,8 +916,10 @@ class RegNet_X_32GF_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 107811560, "num_params": 107811560,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 83.014, "metrics": {
"acc@5": 96.288, "acc@1": 83.014,
"acc@5": 96.288,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
......
...@@ -315,8 +315,10 @@ class ResNet18_Weights(WeightsEnum): ...@@ -315,8 +315,10 @@ class ResNet18_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 11689512, "num_params": 11689512,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 69.758, "metrics": {
"acc@5": 89.078, "acc@1": 69.758,
"acc@5": 89.078,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -330,8 +332,10 @@ class ResNet34_Weights(WeightsEnum): ...@@ -330,8 +332,10 @@ class ResNet34_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 21797672, "num_params": 21797672,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 73.314, "metrics": {
"acc@5": 91.420, "acc@1": 73.314,
"acc@5": 91.420,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -345,8 +349,10 @@ class ResNet50_Weights(WeightsEnum): ...@@ -345,8 +349,10 @@ class ResNet50_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 25557032, "num_params": 25557032,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 76.130, "metrics": {
"acc@5": 92.862, "acc@1": 76.130,
"acc@5": 92.862,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -356,8 +362,10 @@ class ResNet50_Weights(WeightsEnum): ...@@ -356,8 +362,10 @@ class ResNet50_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 25557032, "num_params": 25557032,
"recipe": "https://github.com/pytorch/vision/issues/3995#issuecomment-1013906621", "recipe": "https://github.com/pytorch/vision/issues/3995#issuecomment-1013906621",
"acc@1": 80.858, "metrics": {
"acc@5": 95.434, "acc@1": 80.858,
"acc@5": 95.434,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -371,8 +379,10 @@ class ResNet101_Weights(WeightsEnum): ...@@ -371,8 +379,10 @@ class ResNet101_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 44549160, "num_params": 44549160,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 77.374, "metrics": {
"acc@5": 93.546, "acc@1": 77.374,
"acc@5": 93.546,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -382,8 +392,10 @@ class ResNet101_Weights(WeightsEnum): ...@@ -382,8 +392,10 @@ class ResNet101_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 44549160, "num_params": 44549160,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.886, "metrics": {
"acc@5": 95.780, "acc@1": 81.886,
"acc@5": 95.780,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -397,8 +409,10 @@ class ResNet152_Weights(WeightsEnum): ...@@ -397,8 +409,10 @@ class ResNet152_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 60192808, "num_params": 60192808,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 78.312, "metrics": {
"acc@5": 94.046, "acc@1": 78.312,
"acc@5": 94.046,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -408,8 +422,10 @@ class ResNet152_Weights(WeightsEnum): ...@@ -408,8 +422,10 @@ class ResNet152_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 60192808, "num_params": 60192808,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.284, "metrics": {
"acc@5": 96.002, "acc@1": 82.284,
"acc@5": 96.002,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -423,8 +439,10 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): ...@@ -423,8 +439,10 @@ class ResNeXt50_32X4D_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 25028904, "num_params": 25028904,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
"acc@1": 77.618, "metrics": {
"acc@5": 93.698, "acc@1": 77.618,
"acc@5": 93.698,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -434,8 +452,10 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): ...@@ -434,8 +452,10 @@ class ResNeXt50_32X4D_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 25028904, "num_params": 25028904,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.198, "metrics": {
"acc@5": 95.340, "acc@1": 81.198,
"acc@5": 95.340,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -449,8 +469,10 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): ...@@ -449,8 +469,10 @@ class ResNeXt101_32X8D_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 88791336, "num_params": 88791336,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
"acc@1": 79.312, "metrics": {
"acc@5": 94.526, "acc@1": 79.312,
"acc@5": 94.526,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -460,8 +482,10 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): ...@@ -460,8 +482,10 @@ class ResNeXt101_32X8D_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 88791336, "num_params": 88791336,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 82.834, "metrics": {
"acc@5": 96.228, "acc@1": 82.834,
"acc@5": 96.228,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -475,8 +499,10 @@ class Wide_ResNet50_2_Weights(WeightsEnum): ...@@ -475,8 +499,10 @@ class Wide_ResNet50_2_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 68883240, "num_params": 68883240,
"recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439", "recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
"acc@1": 78.468, "metrics": {
"acc@5": 94.086, "acc@1": 78.468,
"acc@5": 94.086,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -486,8 +512,10 @@ class Wide_ResNet50_2_Weights(WeightsEnum): ...@@ -486,8 +512,10 @@ class Wide_ResNet50_2_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 68883240, "num_params": 68883240,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 81.602, "metrics": {
"acc@5": 95.758, "acc@1": 81.602,
"acc@5": 95.758,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
...@@ -501,8 +529,10 @@ class Wide_ResNet101_2_Weights(WeightsEnum): ...@@ -501,8 +529,10 @@ class Wide_ResNet101_2_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 126886696, "num_params": 126886696,
"recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439", "recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
"acc@1": 78.848, "metrics": {
"acc@5": 94.284, "acc@1": 78.848,
"acc@5": 94.284,
},
}, },
) )
IMAGENET1K_V2 = Weights( IMAGENET1K_V2 = Weights(
...@@ -512,8 +542,10 @@ class Wide_ResNet101_2_Weights(WeightsEnum): ...@@ -512,8 +542,10 @@ class Wide_ResNet101_2_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 126886696, "num_params": 126886696,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.510, "metrics": {
"acc@5": 96.020, "acc@1": 82.510,
"acc@5": 96.020,
},
}, },
) )
DEFAULT = IMAGENET1K_V2 DEFAULT = IMAGENET1K_V2
......
...@@ -142,8 +142,10 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum): ...@@ -142,8 +142,10 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 42004074, "num_params": 42004074,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet50", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet50",
"mIoU": 66.4, "metrics": {
"acc": 92.4, "miou": 66.4,
"pixel_acc": 92.4,
},
}, },
) )
DEFAULT = COCO_WITH_VOC_LABELS_V1 DEFAULT = COCO_WITH_VOC_LABELS_V1
...@@ -157,8 +159,10 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum): ...@@ -157,8 +159,10 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 60996202, "num_params": 60996202,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet101", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet101",
"mIoU": 67.4, "metrics": {
"acc": 92.4, "miou": 67.4,
"pixel_acc": 92.4,
},
}, },
) )
DEFAULT = COCO_WITH_VOC_LABELS_V1 DEFAULT = COCO_WITH_VOC_LABELS_V1
...@@ -172,8 +176,10 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum): ...@@ -172,8 +176,10 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 11029328, "num_params": 11029328,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_mobilenet_v3_large", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_mobilenet_v3_large",
"mIoU": 60.3, "metrics": {
"acc": 91.2, "miou": 60.3,
"pixel_acc": 91.2,
},
}, },
) )
DEFAULT = COCO_WITH_VOC_LABELS_V1 DEFAULT = COCO_WITH_VOC_LABELS_V1
......
...@@ -61,8 +61,10 @@ class FCN_ResNet50_Weights(WeightsEnum): ...@@ -61,8 +61,10 @@ class FCN_ResNet50_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 35322218, "num_params": 35322218,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet50", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet50",
"mIoU": 60.5, "metrics": {
"acc": 91.4, "miou": 60.5,
"pixel_acc": 91.4,
},
}, },
) )
DEFAULT = COCO_WITH_VOC_LABELS_V1 DEFAULT = COCO_WITH_VOC_LABELS_V1
...@@ -76,8 +78,10 @@ class FCN_ResNet101_Weights(WeightsEnum): ...@@ -76,8 +78,10 @@ class FCN_ResNet101_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"num_params": 54314346, "num_params": 54314346,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet101", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet101",
"mIoU": 63.7, "metrics": {
"acc": 91.9, "miou": 63.7,
"pixel_acc": 91.9,
},
}, },
) )
DEFAULT = COCO_WITH_VOC_LABELS_V1 DEFAULT = COCO_WITH_VOC_LABELS_V1
......
...@@ -102,8 +102,10 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum): ...@@ -102,8 +102,10 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum):
"categories": _VOC_CATEGORIES, "categories": _VOC_CATEGORIES,
"min_size": (1, 1), "min_size": (1, 1),
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#lraspp_mobilenet_v3_large", "recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#lraspp_mobilenet_v3_large",
"mIoU": 57.9, "metrics": {
"acc": 91.2, "miou": 57.9,
"pixel_acc": 91.2,
},
}, },
) )
DEFAULT = COCO_WITH_VOC_LABELS_V1 DEFAULT = COCO_WITH_VOC_LABELS_V1
......
...@@ -197,8 +197,10 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum): ...@@ -197,8 +197,10 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 1366792, "num_params": 1366792,
"acc@1": 69.362, "metrics": {
"acc@5": 88.316, "acc@1": 69.362,
"acc@5": 88.316,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -211,8 +213,10 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum): ...@@ -211,8 +213,10 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 2278604, "num_params": 2278604,
"acc@1": 60.552, "metrics": {
"acc@5": 81.746, "acc@1": 60.552,
"acc@5": 81.746,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
......
...@@ -128,8 +128,10 @@ class SqueezeNet1_0_Weights(WeightsEnum): ...@@ -128,8 +128,10 @@ class SqueezeNet1_0_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"min_size": (21, 21), "min_size": (21, 21),
"num_params": 1248424, "num_params": 1248424,
"acc@1": 58.092, "metrics": {
"acc@5": 80.420, "acc@1": 58.092,
"acc@5": 80.420,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -143,8 +145,10 @@ class SqueezeNet1_1_Weights(WeightsEnum): ...@@ -143,8 +145,10 @@ class SqueezeNet1_1_Weights(WeightsEnum):
**_COMMON_META, **_COMMON_META,
"min_size": (17, 17), "min_size": (17, 17),
"num_params": 1235496, "num_params": 1235496,
"acc@1": 58.178, "metrics": {
"acc@5": 80.624, "acc@1": 58.178,
"acc@5": 80.624,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
......
...@@ -120,8 +120,10 @@ class VGG11_Weights(WeightsEnum): ...@@ -120,8 +120,10 @@ class VGG11_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 132863336, "num_params": 132863336,
"acc@1": 69.020, "metrics": {
"acc@5": 88.628, "acc@1": 69.020,
"acc@5": 88.628,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -134,8 +136,10 @@ class VGG11_BN_Weights(WeightsEnum): ...@@ -134,8 +136,10 @@ class VGG11_BN_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 132868840, "num_params": 132868840,
"acc@1": 70.370, "metrics": {
"acc@5": 89.810, "acc@1": 70.370,
"acc@5": 89.810,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -148,8 +152,10 @@ class VGG13_Weights(WeightsEnum): ...@@ -148,8 +152,10 @@ class VGG13_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 133047848, "num_params": 133047848,
"acc@1": 69.928, "metrics": {
"acc@5": 89.246, "acc@1": 69.928,
"acc@5": 89.246,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -162,8 +168,10 @@ class VGG13_BN_Weights(WeightsEnum): ...@@ -162,8 +168,10 @@ class VGG13_BN_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 133053736, "num_params": 133053736,
"acc@1": 71.586, "metrics": {
"acc@5": 90.374, "acc@1": 71.586,
"acc@5": 90.374,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -176,8 +184,10 @@ class VGG16_Weights(WeightsEnum): ...@@ -176,8 +184,10 @@ class VGG16_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 138357544, "num_params": 138357544,
"acc@1": 71.592, "metrics": {
"acc@5": 90.382, "acc@1": 71.592,
"acc@5": 90.382,
},
}, },
) )
# We port the features of a VGG16 backbone trained by amdegroot because unlike the one on TorchVision, it uses the # We port the features of a VGG16 backbone trained by amdegroot because unlike the one on TorchVision, it uses the
...@@ -196,8 +206,10 @@ class VGG16_Weights(WeightsEnum): ...@@ -196,8 +206,10 @@ class VGG16_Weights(WeightsEnum):
"num_params": 138357544, "num_params": 138357544,
"categories": None, "categories": None,
"recipe": "https://github.com/amdegroot/ssd.pytorch#training-ssd", "recipe": "https://github.com/amdegroot/ssd.pytorch#training-ssd",
"acc@1": float("nan"), "metrics": {
"acc@5": float("nan"), "acc@1": float("nan"),
"acc@5": float("nan"),
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -210,8 +222,10 @@ class VGG16_BN_Weights(WeightsEnum): ...@@ -210,8 +222,10 @@ class VGG16_BN_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 138365992, "num_params": 138365992,
"acc@1": 73.360, "metrics": {
"acc@5": 91.516, "acc@1": 73.360,
"acc@5": 91.516,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -224,8 +238,10 @@ class VGG19_Weights(WeightsEnum): ...@@ -224,8 +238,10 @@ class VGG19_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 143667240, "num_params": 143667240,
"acc@1": 72.376, "metrics": {
"acc@5": 90.876, "acc@1": 72.376,
"acc@5": 90.876,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -238,8 +254,10 @@ class VGG19_BN_Weights(WeightsEnum): ...@@ -238,8 +254,10 @@ class VGG19_BN_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 143678248, "num_params": 143678248,
"acc@1": 74.218, "metrics": {
"acc@5": 91.842, "acc@1": 74.218,
"acc@5": 91.842,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
......
...@@ -322,8 +322,10 @@ class R3D_18_Weights(WeightsEnum): ...@@ -322,8 +322,10 @@ class R3D_18_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 33371472, "num_params": 33371472,
"acc@1": 52.75, "metrics": {
"acc@5": 75.45, "acc@1": 52.75,
"acc@5": 75.45,
},
}, },
) )
DEFAULT = KINETICS400_V1 DEFAULT = KINETICS400_V1
...@@ -336,8 +338,10 @@ class MC3_18_Weights(WeightsEnum): ...@@ -336,8 +338,10 @@ class MC3_18_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 11695440, "num_params": 11695440,
"acc@1": 53.90, "metrics": {
"acc@5": 76.29, "acc@1": 53.90,
"acc@5": 76.29,
},
}, },
) )
DEFAULT = KINETICS400_V1 DEFAULT = KINETICS400_V1
...@@ -350,8 +354,10 @@ class R2Plus1D_18_Weights(WeightsEnum): ...@@ -350,8 +354,10 @@ class R2Plus1D_18_Weights(WeightsEnum):
meta={ meta={
**_COMMON_META, **_COMMON_META,
"num_params": 31505325, "num_params": 31505325,
"acc@1": 57.50, "metrics": {
"acc@5": 78.81, "acc@1": 57.50,
"acc@5": 78.81,
},
}, },
) )
DEFAULT = KINETICS400_V1 DEFAULT = KINETICS400_V1
......
...@@ -328,8 +328,10 @@ class ViT_B_16_Weights(WeightsEnum): ...@@ -328,8 +328,10 @@ class ViT_B_16_Weights(WeightsEnum):
"num_params": 86567656, "num_params": 86567656,
"min_size": (224, 224), "min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_16", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_16",
"acc@1": 81.072, "metrics": {
"acc@5": 95.318, "acc@1": 81.072,
"acc@5": 95.318,
},
}, },
) )
IMAGENET1K_SWAG_E2E_V1 = Weights( IMAGENET1K_SWAG_E2E_V1 = Weights(
...@@ -344,8 +346,10 @@ class ViT_B_16_Weights(WeightsEnum): ...@@ -344,8 +346,10 @@ class ViT_B_16_Weights(WeightsEnum):
**_COMMON_SWAG_META, **_COMMON_SWAG_META,
"num_params": 86859496, "num_params": 86859496,
"min_size": (384, 384), "min_size": (384, 384),
"acc@1": 85.304, "metrics": {
"acc@5": 97.650, "acc@1": 85.304,
"acc@5": 97.650,
},
}, },
) )
IMAGENET1K_SWAG_LINEAR_V1 = Weights( IMAGENET1K_SWAG_LINEAR_V1 = Weights(
...@@ -361,8 +365,10 @@ class ViT_B_16_Weights(WeightsEnum): ...@@ -361,8 +365,10 @@ class ViT_B_16_Weights(WeightsEnum):
"recipe": "https://github.com/pytorch/vision/pull/5793", "recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 86567656, "num_params": 86567656,
"min_size": (224, 224), "min_size": (224, 224),
"acc@1": 81.886, "metrics": {
"acc@5": 96.180, "acc@1": 81.886,
"acc@5": 96.180,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -377,8 +383,10 @@ class ViT_B_32_Weights(WeightsEnum): ...@@ -377,8 +383,10 @@ class ViT_B_32_Weights(WeightsEnum):
"num_params": 88224232, "num_params": 88224232,
"min_size": (224, 224), "min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_32", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_32",
"acc@1": 75.912, "metrics": {
"acc@5": 92.466, "acc@1": 75.912,
"acc@5": 92.466,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -393,8 +401,10 @@ class ViT_L_16_Weights(WeightsEnum): ...@@ -393,8 +401,10 @@ class ViT_L_16_Weights(WeightsEnum):
"num_params": 304326632, "num_params": 304326632,
"min_size": (224, 224), "min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_16", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_16",
"acc@1": 79.662, "metrics": {
"acc@5": 94.638, "acc@1": 79.662,
"acc@5": 94.638,
},
}, },
) )
IMAGENET1K_SWAG_E2E_V1 = Weights( IMAGENET1K_SWAG_E2E_V1 = Weights(
...@@ -409,8 +419,10 @@ class ViT_L_16_Weights(WeightsEnum): ...@@ -409,8 +419,10 @@ class ViT_L_16_Weights(WeightsEnum):
**_COMMON_SWAG_META, **_COMMON_SWAG_META,
"num_params": 305174504, "num_params": 305174504,
"min_size": (512, 512), "min_size": (512, 512),
"acc@1": 88.064, "metrics": {
"acc@5": 98.512, "acc@1": 88.064,
"acc@5": 98.512,
},
}, },
) )
IMAGENET1K_SWAG_LINEAR_V1 = Weights( IMAGENET1K_SWAG_LINEAR_V1 = Weights(
...@@ -426,8 +438,10 @@ class ViT_L_16_Weights(WeightsEnum): ...@@ -426,8 +438,10 @@ class ViT_L_16_Weights(WeightsEnum):
"recipe": "https://github.com/pytorch/vision/pull/5793", "recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 304326632, "num_params": 304326632,
"min_size": (224, 224), "min_size": (224, 224),
"acc@1": 85.146, "metrics": {
"acc@5": 97.422, "acc@1": 85.146,
"acc@5": 97.422,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -442,8 +456,10 @@ class ViT_L_32_Weights(WeightsEnum): ...@@ -442,8 +456,10 @@ class ViT_L_32_Weights(WeightsEnum):
"num_params": 306535400, "num_params": 306535400,
"min_size": (224, 224), "min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_32", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_32",
"acc@1": 76.972, "metrics": {
"acc@5": 93.07, "acc@1": 76.972,
"acc@5": 93.07,
},
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -462,8 +478,10 @@ class ViT_H_14_Weights(WeightsEnum): ...@@ -462,8 +478,10 @@ class ViT_H_14_Weights(WeightsEnum):
**_COMMON_SWAG_META, **_COMMON_SWAG_META,
"num_params": 633470440, "num_params": 633470440,
"min_size": (518, 518), "min_size": (518, 518),
"acc@1": 88.552, "metrics": {
"acc@5": 98.694, "acc@1": 88.552,
"acc@5": 98.694,
},
}, },
) )
IMAGENET1K_SWAG_LINEAR_V1 = Weights( IMAGENET1K_SWAG_LINEAR_V1 = Weights(
...@@ -479,8 +497,10 @@ class ViT_H_14_Weights(WeightsEnum): ...@@ -479,8 +497,10 @@ class ViT_H_14_Weights(WeightsEnum):
"recipe": "https://github.com/pytorch/vision/pull/5793", "recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 632045800, "num_params": 632045800,
"min_size": (224, 224), "min_size": (224, 224),
"acc@1": 85.708, "metrics": {
"acc@5": 97.730, "acc@1": 85.708,
"acc@5": 97.730,
},
}, },
) )
DEFAULT = IMAGENET1K_SWAG_E2E_V1 DEFAULT = IMAGENET1K_SWAG_E2E_V1
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment