Unverified Commit 6f016dd9 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Restructuring metrics meta-data (#5859)

* Restructuring metrics meta-data for detection, segmentation and optical flow.

* Renaming acc to pixel_acc for segmentation

* Restructure video meta-data.

* Restructure classification and quantization meta-data.

* Fix tests.

* Fix documentation
parent c82b86d1
......@@ -183,8 +183,10 @@ class Inception_V3_QuantizedWeights(WeightsEnum):
"backend": "fbgemm",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"unquantized": Inception_V3_Weights.IMAGENET1K_V1,
"acc@1": 77.176,
"acc@5": 93.354,
"metrics": {
"acc@1": 77.176,
"acc@5": 93.354,
},
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
......
......@@ -75,8 +75,10 @@ class MobileNet_V2_QuantizedWeights(WeightsEnum):
"backend": "qnnpack",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv2",
"unquantized": MobileNet_V2_Weights.IMAGENET1K_V1,
"acc@1": 71.658,
"acc@5": 90.150,
"metrics": {
"acc@1": 71.658,
"acc@5": 90.150,
},
},
)
DEFAULT = IMAGENET1K_QNNPACK_V1
......
......@@ -165,8 +165,10 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum):
"backend": "qnnpack",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3",
"unquantized": MobileNet_V3_Large_Weights.IMAGENET1K_V1,
"acc@1": 73.004,
"acc@5": 90.858,
"metrics": {
"acc@1": 73.004,
"acc@5": 90.858,
},
},
)
DEFAULT = IMAGENET1K_QNNPACK_V1
......
......@@ -162,8 +162,10 @@ class ResNet18_QuantizedWeights(WeightsEnum):
**_COMMON_META,
"num_params": 11689512,
"unquantized": ResNet18_Weights.IMAGENET1K_V1,
"acc@1": 69.494,
"acc@5": 88.882,
"metrics": {
"acc@1": 69.494,
"acc@5": 88.882,
},
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
......@@ -177,8 +179,10 @@ class ResNet50_QuantizedWeights(WeightsEnum):
**_COMMON_META,
"num_params": 25557032,
"unquantized": ResNet50_Weights.IMAGENET1K_V1,
"acc@1": 75.920,
"acc@5": 92.814,
"metrics": {
"acc@1": 75.920,
"acc@5": 92.814,
},
},
)
IMAGENET1K_FBGEMM_V2 = Weights(
......@@ -188,8 +192,10 @@ class ResNet50_QuantizedWeights(WeightsEnum):
**_COMMON_META,
"num_params": 25557032,
"unquantized": ResNet50_Weights.IMAGENET1K_V2,
"acc@1": 80.282,
"acc@5": 94.976,
"metrics": {
"acc@1": 80.282,
"acc@5": 94.976,
},
},
)
DEFAULT = IMAGENET1K_FBGEMM_V2
......@@ -203,8 +209,10 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
**_COMMON_META,
"num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
"acc@1": 78.986,
"acc@5": 94.480,
"metrics": {
"acc@1": 78.986,
"acc@5": 94.480,
},
},
)
IMAGENET1K_FBGEMM_V2 = Weights(
......@@ -214,8 +222,10 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
**_COMMON_META,
"num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V2,
"acc@1": 82.574,
"acc@5": 96.132,
"metrics": {
"acc@1": 82.574,
"acc@5": 96.132,
},
},
)
DEFAULT = IMAGENET1K_FBGEMM_V2
......
......@@ -117,8 +117,10 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum):
**_COMMON_META,
"num_params": 1366792,
"unquantized": ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1,
"acc@1": 57.972,
"acc@5": 79.780,
"metrics": {
"acc@1": 57.972,
"acc@5": 79.780,
},
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
......@@ -132,8 +134,10 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum):
**_COMMON_META,
"num_params": 2278604,
"unquantized": ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1,
"acc@1": 68.360,
"acc@5": 87.582,
"metrics": {
"acc@1": 68.360,
"acc@5": 87.582,
},
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
......
......@@ -422,8 +422,10 @@ class RegNet_Y_400MF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 4344144,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 74.046,
"acc@5": 91.716,
"metrics": {
"acc@1": 74.046,
"acc@5": 91.716,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -433,8 +435,10 @@ class RegNet_Y_400MF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 4344144,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 75.804,
"acc@5": 92.742,
"metrics": {
"acc@1": 75.804,
"acc@5": 92.742,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -448,8 +452,10 @@ class RegNet_Y_800MF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 6432512,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 76.420,
"acc@5": 93.136,
"metrics": {
"acc@1": 76.420,
"acc@5": 93.136,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -459,8 +465,10 @@ class RegNet_Y_800MF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 6432512,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 78.828,
"acc@5": 94.502,
"metrics": {
"acc@1": 78.828,
"acc@5": 94.502,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -474,8 +482,10 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 11202430,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 77.950,
"acc@5": 93.966,
"metrics": {
"acc@1": 77.950,
"acc@5": 93.966,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -485,8 +495,10 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 11202430,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 80.876,
"acc@5": 95.444,
"metrics": {
"acc@1": 80.876,
"acc@5": 95.444,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -500,8 +512,10 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 19436338,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 78.948,
"acc@5": 94.576,
"metrics": {
"acc@1": 78.948,
"acc@5": 94.576,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -511,8 +525,10 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 19436338,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.982,
"acc@5": 95.972,
"metrics": {
"acc@1": 81.982,
"acc@5": 95.972,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -526,8 +542,10 @@ class RegNet_Y_8GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 39381472,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 80.032,
"acc@5": 95.048,
"metrics": {
"acc@1": 80.032,
"acc@5": 95.048,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -537,8 +555,10 @@ class RegNet_Y_8GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 39381472,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.828,
"acc@5": 96.330,
"metrics": {
"acc@1": 82.828,
"acc@5": 96.330,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -552,8 +572,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 83590140,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"acc@1": 80.424,
"acc@5": 95.240,
"metrics": {
"acc@1": 80.424,
"acc@5": 95.240,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -563,8 +585,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 83590140,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.886,
"acc@5": 96.328,
"metrics": {
"acc@1": 82.886,
"acc@5": 96.328,
},
},
)
IMAGENET1K_SWAG_E2E_V1 = Weights(
......@@ -575,8 +599,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
meta={
**_COMMON_SWAG_META,
"num_params": 83590140,
"acc@1": 86.012,
"acc@5": 98.054,
"metrics": {
"acc@1": 86.012,
"acc@5": 98.054,
},
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
......@@ -588,8 +614,10 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 83590140,
"acc@1": 83.976,
"acc@5": 97.244,
"metrics": {
"acc@1": 83.976,
"acc@5": 97.244,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -603,8 +631,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 145046770,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"acc@1": 80.878,
"acc@5": 95.340,
"metrics": {
"acc@1": 80.878,
"acc@5": 95.340,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -614,8 +644,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 145046770,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 83.368,
"acc@5": 96.498,
"metrics": {
"acc@1": 83.368,
"acc@5": 96.498,
},
},
)
IMAGENET1K_SWAG_E2E_V1 = Weights(
......@@ -626,8 +658,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
meta={
**_COMMON_SWAG_META,
"num_params": 145046770,
"acc@1": 86.838,
"acc@5": 98.362,
"metrics": {
"acc@1": 86.838,
"acc@5": 98.362,
},
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
......@@ -639,8 +673,10 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 145046770,
"acc@1": 84.622,
"acc@5": 97.480,
"metrics": {
"acc@1": 84.622,
"acc@5": 97.480,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -655,8 +691,10 @@ class RegNet_Y_128GF_Weights(WeightsEnum):
meta={
**_COMMON_SWAG_META,
"num_params": 644812894,
"acc@1": 88.228,
"acc@5": 98.682,
"metrics": {
"acc@1": 88.228,
"acc@5": 98.682,
},
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
......@@ -668,8 +706,10 @@ class RegNet_Y_128GF_Weights(WeightsEnum):
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 644812894,
"acc@1": 86.068,
"acc@5": 97.844,
"metrics": {
"acc@1": 86.068,
"acc@5": 97.844,
},
},
)
DEFAULT = IMAGENET1K_SWAG_E2E_V1
......@@ -683,8 +723,10 @@ class RegNet_X_400MF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 5495976,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 72.834,
"acc@5": 90.950,
"metrics": {
"acc@1": 72.834,
"acc@5": 90.950,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -694,8 +736,10 @@ class RegNet_X_400MF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 5495976,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 74.864,
"acc@5": 92.322,
"metrics": {
"acc@1": 74.864,
"acc@5": 92.322,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -709,8 +753,10 @@ class RegNet_X_800MF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 7259656,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 75.212,
"acc@5": 92.348,
"metrics": {
"acc@1": 75.212,
"acc@5": 92.348,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -720,8 +766,10 @@ class RegNet_X_800MF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 7259656,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 77.522,
"acc@5": 93.826,
"metrics": {
"acc@1": 77.522,
"acc@5": 93.826,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -735,8 +783,10 @@ class RegNet_X_1_6GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 9190136,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"acc@1": 77.040,
"acc@5": 93.440,
"metrics": {
"acc@1": 77.040,
"acc@5": 93.440,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -746,8 +796,10 @@ class RegNet_X_1_6GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 9190136,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 79.668,
"acc@5": 94.922,
"metrics": {
"acc@1": 79.668,
"acc@5": 94.922,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -761,8 +813,10 @@ class RegNet_X_3_2GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 15296552,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 78.364,
"acc@5": 93.992,
"metrics": {
"acc@1": 78.364,
"acc@5": 93.992,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -772,8 +826,10 @@ class RegNet_X_3_2GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 15296552,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.196,
"acc@5": 95.430,
"metrics": {
"acc@1": 81.196,
"acc@5": 95.430,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -787,8 +843,10 @@ class RegNet_X_8GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 39572648,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 79.344,
"acc@5": 94.686,
"metrics": {
"acc@1": 79.344,
"acc@5": 94.686,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -798,8 +856,10 @@ class RegNet_X_8GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 39572648,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.682,
"acc@5": 95.678,
"metrics": {
"acc@1": 81.682,
"acc@5": 95.678,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -813,8 +873,10 @@ class RegNet_X_16GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 54278536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"acc@1": 80.058,
"acc@5": 94.944,
"metrics": {
"acc@1": 80.058,
"acc@5": 94.944,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -824,8 +886,10 @@ class RegNet_X_16GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 54278536,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.716,
"acc@5": 96.196,
"metrics": {
"acc@1": 82.716,
"acc@5": 96.196,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -839,8 +903,10 @@ class RegNet_X_32GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 107811560,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"acc@1": 80.622,
"acc@5": 95.248,
"metrics": {
"acc@1": 80.622,
"acc@5": 95.248,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -850,8 +916,10 @@ class RegNet_X_32GF_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 107811560,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 83.014,
"acc@5": 96.288,
"metrics": {
"acc@1": 83.014,
"acc@5": 96.288,
},
},
)
DEFAULT = IMAGENET1K_V2
......
......@@ -315,8 +315,10 @@ class ResNet18_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 11689512,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 69.758,
"acc@5": 89.078,
"metrics": {
"acc@1": 69.758,
"acc@5": 89.078,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -330,8 +332,10 @@ class ResNet34_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 21797672,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 73.314,
"acc@5": 91.420,
"metrics": {
"acc@1": 73.314,
"acc@5": 91.420,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -345,8 +349,10 @@ class ResNet50_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 25557032,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 76.130,
"acc@5": 92.862,
"metrics": {
"acc@1": 76.130,
"acc@5": 92.862,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -356,8 +362,10 @@ class ResNet50_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 25557032,
"recipe": "https://github.com/pytorch/vision/issues/3995#issuecomment-1013906621",
"acc@1": 80.858,
"acc@5": 95.434,
"metrics": {
"acc@1": 80.858,
"acc@5": 95.434,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -371,8 +379,10 @@ class ResNet101_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 44549160,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 77.374,
"acc@5": 93.546,
"metrics": {
"acc@1": 77.374,
"acc@5": 93.546,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -382,8 +392,10 @@ class ResNet101_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 44549160,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.886,
"acc@5": 95.780,
"metrics": {
"acc@1": 81.886,
"acc@5": 95.780,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -397,8 +409,10 @@ class ResNet152_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 60192808,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
"acc@1": 78.312,
"acc@5": 94.046,
"metrics": {
"acc@1": 78.312,
"acc@5": 94.046,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -408,8 +422,10 @@ class ResNet152_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 60192808,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.284,
"acc@5": 96.002,
"metrics": {
"acc@1": 82.284,
"acc@5": 96.002,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -423,8 +439,10 @@ class ResNeXt50_32X4D_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 25028904,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
"acc@1": 77.618,
"acc@5": 93.698,
"metrics": {
"acc@1": 77.618,
"acc@5": 93.698,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -434,8 +452,10 @@ class ResNeXt50_32X4D_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 25028904,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.198,
"acc@5": 95.340,
"metrics": {
"acc@1": 81.198,
"acc@5": 95.340,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -449,8 +469,10 @@ class ResNeXt101_32X8D_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 88791336,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
"acc@1": 79.312,
"acc@5": 94.526,
"metrics": {
"acc@1": 79.312,
"acc@5": 94.526,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -460,8 +482,10 @@ class ResNeXt101_32X8D_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 88791336,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 82.834,
"acc@5": 96.228,
"metrics": {
"acc@1": 82.834,
"acc@5": 96.228,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -475,8 +499,10 @@ class Wide_ResNet50_2_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 68883240,
"recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
"acc@1": 78.468,
"acc@5": 94.086,
"metrics": {
"acc@1": 78.468,
"acc@5": 94.086,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -486,8 +512,10 @@ class Wide_ResNet50_2_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 68883240,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 81.602,
"acc@5": 95.758,
"metrics": {
"acc@1": 81.602,
"acc@5": 95.758,
},
},
)
DEFAULT = IMAGENET1K_V2
......@@ -501,8 +529,10 @@ class Wide_ResNet101_2_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 126886696,
"recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
"acc@1": 78.848,
"acc@5": 94.284,
"metrics": {
"acc@1": 78.848,
"acc@5": 94.284,
},
},
)
IMAGENET1K_V2 = Weights(
......@@ -512,8 +542,10 @@ class Wide_ResNet101_2_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 126886696,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.510,
"acc@5": 96.020,
"metrics": {
"acc@1": 82.510,
"acc@5": 96.020,
},
},
)
DEFAULT = IMAGENET1K_V2
......
......@@ -142,8 +142,10 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 42004074,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet50",
"mIoU": 66.4,
"acc": 92.4,
"metrics": {
"miou": 66.4,
"pixel_acc": 92.4,
},
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......@@ -157,8 +159,10 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 60996202,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet101",
"mIoU": 67.4,
"acc": 92.4,
"metrics": {
"miou": 67.4,
"pixel_acc": 92.4,
},
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......@@ -172,8 +176,10 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 11029328,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_mobilenet_v3_large",
"mIoU": 60.3,
"acc": 91.2,
"metrics": {
"miou": 60.3,
"pixel_acc": 91.2,
},
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......
......@@ -61,8 +61,10 @@ class FCN_ResNet50_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 35322218,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet50",
"mIoU": 60.5,
"acc": 91.4,
"metrics": {
"miou": 60.5,
"pixel_acc": 91.4,
},
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......@@ -76,8 +78,10 @@ class FCN_ResNet101_Weights(WeightsEnum):
**_COMMON_META,
"num_params": 54314346,
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet101",
"mIoU": 63.7,
"acc": 91.9,
"metrics": {
"miou": 63.7,
"pixel_acc": 91.9,
},
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......
......@@ -102,8 +102,10 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum):
"categories": _VOC_CATEGORIES,
"min_size": (1, 1),
"recipe": "https://github.com/pytorch/vision/tree/main/references/segmentation#lraspp_mobilenet_v3_large",
"mIoU": 57.9,
"acc": 91.2,
"metrics": {
"miou": 57.9,
"pixel_acc": 91.2,
},
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......
......@@ -197,8 +197,10 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 1366792,
"acc@1": 69.362,
"acc@5": 88.316,
"metrics": {
"acc@1": 69.362,
"acc@5": 88.316,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -211,8 +213,10 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 2278604,
"acc@1": 60.552,
"acc@5": 81.746,
"metrics": {
"acc@1": 60.552,
"acc@5": 81.746,
},
},
)
DEFAULT = IMAGENET1K_V1
......
......@@ -128,8 +128,10 @@ class SqueezeNet1_0_Weights(WeightsEnum):
**_COMMON_META,
"min_size": (21, 21),
"num_params": 1248424,
"acc@1": 58.092,
"acc@5": 80.420,
"metrics": {
"acc@1": 58.092,
"acc@5": 80.420,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -143,8 +145,10 @@ class SqueezeNet1_1_Weights(WeightsEnum):
**_COMMON_META,
"min_size": (17, 17),
"num_params": 1235496,
"acc@1": 58.178,
"acc@5": 80.624,
"metrics": {
"acc@1": 58.178,
"acc@5": 80.624,
},
},
)
DEFAULT = IMAGENET1K_V1
......
......@@ -120,8 +120,10 @@ class VGG11_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 132863336,
"acc@1": 69.020,
"acc@5": 88.628,
"metrics": {
"acc@1": 69.020,
"acc@5": 88.628,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -134,8 +136,10 @@ class VGG11_BN_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 132868840,
"acc@1": 70.370,
"acc@5": 89.810,
"metrics": {
"acc@1": 70.370,
"acc@5": 89.810,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -148,8 +152,10 @@ class VGG13_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 133047848,
"acc@1": 69.928,
"acc@5": 89.246,
"metrics": {
"acc@1": 69.928,
"acc@5": 89.246,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -162,8 +168,10 @@ class VGG13_BN_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 133053736,
"acc@1": 71.586,
"acc@5": 90.374,
"metrics": {
"acc@1": 71.586,
"acc@5": 90.374,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -176,8 +184,10 @@ class VGG16_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 138357544,
"acc@1": 71.592,
"acc@5": 90.382,
"metrics": {
"acc@1": 71.592,
"acc@5": 90.382,
},
},
)
# We port the features of a VGG16 backbone trained by amdegroot because unlike the one on TorchVision, it uses the
......@@ -196,8 +206,10 @@ class VGG16_Weights(WeightsEnum):
"num_params": 138357544,
"categories": None,
"recipe": "https://github.com/amdegroot/ssd.pytorch#training-ssd",
"acc@1": float("nan"),
"acc@5": float("nan"),
"metrics": {
"acc@1": float("nan"),
"acc@5": float("nan"),
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -210,8 +222,10 @@ class VGG16_BN_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 138365992,
"acc@1": 73.360,
"acc@5": 91.516,
"metrics": {
"acc@1": 73.360,
"acc@5": 91.516,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -224,8 +238,10 @@ class VGG19_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 143667240,
"acc@1": 72.376,
"acc@5": 90.876,
"metrics": {
"acc@1": 72.376,
"acc@5": 90.876,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -238,8 +254,10 @@ class VGG19_BN_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 143678248,
"acc@1": 74.218,
"acc@5": 91.842,
"metrics": {
"acc@1": 74.218,
"acc@5": 91.842,
},
},
)
DEFAULT = IMAGENET1K_V1
......
......@@ -322,8 +322,10 @@ class R3D_18_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 33371472,
"acc@1": 52.75,
"acc@5": 75.45,
"metrics": {
"acc@1": 52.75,
"acc@5": 75.45,
},
},
)
DEFAULT = KINETICS400_V1
......@@ -336,8 +338,10 @@ class MC3_18_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 11695440,
"acc@1": 53.90,
"acc@5": 76.29,
"metrics": {
"acc@1": 53.90,
"acc@5": 76.29,
},
},
)
DEFAULT = KINETICS400_V1
......@@ -350,8 +354,10 @@ class R2Plus1D_18_Weights(WeightsEnum):
meta={
**_COMMON_META,
"num_params": 31505325,
"acc@1": 57.50,
"acc@5": 78.81,
"metrics": {
"acc@1": 57.50,
"acc@5": 78.81,
},
},
)
DEFAULT = KINETICS400_V1
......
......@@ -328,8 +328,10 @@ class ViT_B_16_Weights(WeightsEnum):
"num_params": 86567656,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_16",
"acc@1": 81.072,
"acc@5": 95.318,
"metrics": {
"acc@1": 81.072,
"acc@5": 95.318,
},
},
)
IMAGENET1K_SWAG_E2E_V1 = Weights(
......@@ -344,8 +346,10 @@ class ViT_B_16_Weights(WeightsEnum):
**_COMMON_SWAG_META,
"num_params": 86859496,
"min_size": (384, 384),
"acc@1": 85.304,
"acc@5": 97.650,
"metrics": {
"acc@1": 85.304,
"acc@5": 97.650,
},
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
......@@ -361,8 +365,10 @@ class ViT_B_16_Weights(WeightsEnum):
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 86567656,
"min_size": (224, 224),
"acc@1": 81.886,
"acc@5": 96.180,
"metrics": {
"acc@1": 81.886,
"acc@5": 96.180,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -377,8 +383,10 @@ class ViT_B_32_Weights(WeightsEnum):
"num_params": 88224232,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_b_32",
"acc@1": 75.912,
"acc@5": 92.466,
"metrics": {
"acc@1": 75.912,
"acc@5": 92.466,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -393,8 +401,10 @@ class ViT_L_16_Weights(WeightsEnum):
"num_params": 304326632,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_16",
"acc@1": 79.662,
"acc@5": 94.638,
"metrics": {
"acc@1": 79.662,
"acc@5": 94.638,
},
},
)
IMAGENET1K_SWAG_E2E_V1 = Weights(
......@@ -409,8 +419,10 @@ class ViT_L_16_Weights(WeightsEnum):
**_COMMON_SWAG_META,
"num_params": 305174504,
"min_size": (512, 512),
"acc@1": 88.064,
"acc@5": 98.512,
"metrics": {
"acc@1": 88.064,
"acc@5": 98.512,
},
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
......@@ -426,8 +438,10 @@ class ViT_L_16_Weights(WeightsEnum):
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 304326632,
"min_size": (224, 224),
"acc@1": 85.146,
"acc@5": 97.422,
"metrics": {
"acc@1": 85.146,
"acc@5": 97.422,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -442,8 +456,10 @@ class ViT_L_32_Weights(WeightsEnum):
"num_params": 306535400,
"min_size": (224, 224),
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#vit_l_32",
"acc@1": 76.972,
"acc@5": 93.07,
"metrics": {
"acc@1": 76.972,
"acc@5": 93.07,
},
},
)
DEFAULT = IMAGENET1K_V1
......@@ -462,8 +478,10 @@ class ViT_H_14_Weights(WeightsEnum):
**_COMMON_SWAG_META,
"num_params": 633470440,
"min_size": (518, 518),
"acc@1": 88.552,
"acc@5": 98.694,
"metrics": {
"acc@1": 88.552,
"acc@5": 98.694,
},
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
......@@ -479,8 +497,10 @@ class ViT_H_14_Weights(WeightsEnum):
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 632045800,
"min_size": (224, 224),
"acc@1": 85.708,
"acc@5": 97.730,
"metrics": {
"acc@1": 85.708,
"acc@5": 97.730,
},
},
)
DEFAULT = IMAGENET1K_SWAG_E2E_V1
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment