Unverified Commit ed2a0adb authored by Nicolas Hug's avatar Nicolas Hug Committed by GitHub
Browse files

Cleanup weight docs (#7074)



* _weight_size -> _file_size

* Better formatting of individual weights tables

* Remove file size from main tables to avoid confusion with weight size (as in RAM)

* Remove unnecessary (file size) suffix

* Fix CI error?

* Formatting
Co-authored-by: default avatarPhilip Meier <github.pmeier@posteo.de>
parent 90cfb10d
......@@ -553,7 +553,7 @@ class Raft_Large_Weights(WeightsEnum):
"Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506},
},
"_ops": 211.007,
"_weight_size": 20.129,
"_file_size": 20.129,
"_docs": """These weights were ported from the original paper. They
are trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
......@@ -573,7 +573,7 @@ class Raft_Large_Weights(WeightsEnum):
"Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679},
},
"_ops": 211.007,
"_weight_size": 20.129,
"_file_size": 20.129,
"_docs": """These weights were trained from scratch on
:class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
......@@ -593,7 +593,7 @@ class Raft_Large_Weights(WeightsEnum):
"Sintel-Test-Finalpass": {"epe": 3.18},
},
"_ops": 211.007,
"_weight_size": 20.129,
"_file_size": 20.129,
"_docs": """
These weights were ported from the original paper. They are
trained on :class:`~torchvision.datasets.FlyingChairs` +
......@@ -619,7 +619,7 @@ class Raft_Large_Weights(WeightsEnum):
"Sintel-Test-Finalpass": {"epe": 3.067},
},
"_ops": 211.007,
"_weight_size": 20.129,
"_file_size": 20.129,
"_docs": """
These weights were trained from scratch. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
......@@ -645,7 +645,7 @@ class Raft_Large_Weights(WeightsEnum):
"Kitti-Test": {"fl_all": 5.10},
},
"_ops": 211.007,
"_weight_size": 20.129,
"_file_size": 20.129,
"_docs": """
These weights were ported from the original paper. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
......@@ -668,7 +668,7 @@ class Raft_Large_Weights(WeightsEnum):
"Kitti-Test": {"fl_all": 5.19},
},
"_ops": 211.007,
"_weight_size": 20.129,
"_file_size": 20.129,
"_docs": """
These weights were trained from scratch. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
......@@ -711,7 +711,7 @@ class Raft_Small_Weights(WeightsEnum):
"Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801},
},
"_ops": 47.655,
"_weight_size": 3.821,
"_file_size": 3.821,
"_docs": """These weights were ported from the original paper. They
are trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
......@@ -730,7 +730,7 @@ class Raft_Small_Weights(WeightsEnum):
"Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369},
},
"_ops": 47.655,
"_weight_size": 3.821,
"_file_size": 3.821,
"_docs": """These weights were trained from scratch on
:class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
......
......@@ -124,7 +124,7 @@ class GoogLeNet_QuantizedWeights(WeightsEnum):
}
},
"_ops": 1.498,
"_weight_size": 12.618,
"_file_size": 12.618,
"_docs": """
These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
weights listed below.
......
......@@ -184,7 +184,7 @@ class Inception_V3_QuantizedWeights(WeightsEnum):
}
},
"_ops": 5.713,
"_weight_size": 23.146,
"_file_size": 23.146,
"_docs": """
These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
weights listed below.
......
......@@ -81,7 +81,7 @@ class MobileNet_V2_QuantizedWeights(WeightsEnum):
}
},
"_ops": 0.301,
"_weight_size": 3.423,
"_file_size": 3.423,
"_docs": """
These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
weights listed below.
......
......@@ -176,7 +176,7 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum):
}
},
"_ops": 0.217,
"_weight_size": 21.554,
"_file_size": 21.554,
"_docs": """
These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
weights listed below.
......
......@@ -176,7 +176,7 @@ class ResNet18_QuantizedWeights(WeightsEnum):
}
},
"_ops": 1.814,
"_weight_size": 11.238,
"_file_size": 11.238,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
......@@ -197,7 +197,7 @@ class ResNet50_QuantizedWeights(WeightsEnum):
}
},
"_ops": 4.089,
"_weight_size": 24.759,
"_file_size": 24.759,
},
)
IMAGENET1K_FBGEMM_V2 = Weights(
......@@ -214,7 +214,7 @@ class ResNet50_QuantizedWeights(WeightsEnum):
}
},
"_ops": 4.089,
"_weight_size": 24.953,
"_file_size": 24.953,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V2
......@@ -235,7 +235,7 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
}
},
"_ops": 16.414,
"_weight_size": 86.034,
"_file_size": 86.034,
},
)
IMAGENET1K_FBGEMM_V2 = Weights(
......@@ -252,7 +252,7 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
}
},
"_ops": 16.414,
"_weight_size": 86.645,
"_file_size": 86.645,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V2
......@@ -274,7 +274,7 @@ class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum):
}
},
"_ops": 15.46,
"_weight_size": 81.556,
"_file_size": 81.556,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
......
......@@ -140,7 +140,7 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum):
}
},
"_ops": 0.04,
"_weight_size": 1.501,
"_file_size": 1.501,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
......@@ -161,7 +161,7 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum):
}
},
"_ops": 0.145,
"_weight_size": 2.334,
"_file_size": 2.334,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
......@@ -183,7 +183,7 @@ class ShuffleNet_V2_X1_5_QuantizedWeights(WeightsEnum):
}
},
"_ops": 0.296,
"_weight_size": 3.672,
"_file_size": 3.672,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
......@@ -205,7 +205,7 @@ class ShuffleNet_V2_X2_0_QuantizedWeights(WeightsEnum):
}
},
"_ops": 0.583,
"_weight_size": 7.467,
"_file_size": 7.467,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
......
......@@ -429,7 +429,7 @@ class RegNet_Y_400MF_Weights(WeightsEnum):
}
},
"_ops": 0.402,
"_weight_size": 16.806,
"_file_size": 16.806,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -447,7 +447,7 @@ class RegNet_Y_400MF_Weights(WeightsEnum):
}
},
"_ops": 0.402,
"_weight_size": 16.806,
"_file_size": 16.806,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -473,7 +473,7 @@ class RegNet_Y_800MF_Weights(WeightsEnum):
}
},
"_ops": 0.834,
"_weight_size": 24.774,
"_file_size": 24.774,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -491,7 +491,7 @@ class RegNet_Y_800MF_Weights(WeightsEnum):
}
},
"_ops": 0.834,
"_weight_size": 24.774,
"_file_size": 24.774,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -517,7 +517,7 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum):
}
},
"_ops": 1.612,
"_weight_size": 43.152,
"_file_size": 43.152,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -535,7 +535,7 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum):
}
},
"_ops": 1.612,
"_weight_size": 43.152,
"_file_size": 43.152,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -561,7 +561,7 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum):
}
},
"_ops": 3.176,
"_weight_size": 74.567,
"_file_size": 74.567,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -579,7 +579,7 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum):
}
},
"_ops": 3.176,
"_weight_size": 74.567,
"_file_size": 74.567,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -605,7 +605,7 @@ class RegNet_Y_8GF_Weights(WeightsEnum):
}
},
"_ops": 8.473,
"_weight_size": 150.701,
"_file_size": 150.701,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -623,7 +623,7 @@ class RegNet_Y_8GF_Weights(WeightsEnum):
}
},
"_ops": 8.473,
"_weight_size": 150.701,
"_file_size": 150.701,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -649,7 +649,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
}
},
"_ops": 15.912,
"_weight_size": 319.49,
"_file_size": 319.49,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -667,7 +667,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
}
},
"_ops": 15.912,
"_weight_size": 319.49,
"_file_size": 319.49,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -690,7 +690,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
}
},
"_ops": 46.735,
"_weight_size": 319.49,
"_file_size": 319.49,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
......@@ -713,7 +713,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum):
}
},
"_ops": 15.912,
"_weight_size": 319.49,
"_file_size": 319.49,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
......@@ -738,7 +738,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
}
},
"_ops": 32.28,
"_weight_size": 554.076,
"_file_size": 554.076,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -756,7 +756,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
}
},
"_ops": 32.28,
"_weight_size": 554.076,
"_file_size": 554.076,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -779,7 +779,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
}
},
"_ops": 94.826,
"_weight_size": 554.076,
"_file_size": 554.076,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
......@@ -802,7 +802,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum):
}
},
"_ops": 32.28,
"_weight_size": 554.076,
"_file_size": 554.076,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
......@@ -828,7 +828,7 @@ class RegNet_Y_128GF_Weights(WeightsEnum):
}
},
"_ops": 374.57,
"_weight_size": 2461.564,
"_file_size": 2461.564,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
......@@ -851,7 +851,7 @@ class RegNet_Y_128GF_Weights(WeightsEnum):
}
},
"_ops": 127.518,
"_weight_size": 2461.564,
"_file_size": 2461.564,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
......@@ -876,7 +876,7 @@ class RegNet_X_400MF_Weights(WeightsEnum):
}
},
"_ops": 0.414,
"_weight_size": 21.258,
"_file_size": 21.258,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -894,7 +894,7 @@ class RegNet_X_400MF_Weights(WeightsEnum):
}
},
"_ops": 0.414,
"_weight_size": 21.257,
"_file_size": 21.257,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -920,7 +920,7 @@ class RegNet_X_800MF_Weights(WeightsEnum):
}
},
"_ops": 0.8,
"_weight_size": 27.945,
"_file_size": 27.945,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -938,7 +938,7 @@ class RegNet_X_800MF_Weights(WeightsEnum):
}
},
"_ops": 0.8,
"_weight_size": 27.945,
"_file_size": 27.945,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -964,7 +964,7 @@ class RegNet_X_1_6GF_Weights(WeightsEnum):
}
},
"_ops": 1.603,
"_weight_size": 35.339,
"_file_size": 35.339,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -982,7 +982,7 @@ class RegNet_X_1_6GF_Weights(WeightsEnum):
}
},
"_ops": 1.603,
"_weight_size": 35.339,
"_file_size": 35.339,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -1008,7 +1008,7 @@ class RegNet_X_3_2GF_Weights(WeightsEnum):
}
},
"_ops": 3.177,
"_weight_size": 58.756,
"_file_size": 58.756,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -1026,7 +1026,7 @@ class RegNet_X_3_2GF_Weights(WeightsEnum):
}
},
"_ops": 3.177,
"_weight_size": 58.756,
"_file_size": 58.756,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -1052,7 +1052,7 @@ class RegNet_X_8GF_Weights(WeightsEnum):
}
},
"_ops": 7.995,
"_weight_size": 151.456,
"_file_size": 151.456,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -1070,7 +1070,7 @@ class RegNet_X_8GF_Weights(WeightsEnum):
}
},
"_ops": 7.995,
"_weight_size": 151.456,
"_file_size": 151.456,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -1096,7 +1096,7 @@ class RegNet_X_16GF_Weights(WeightsEnum):
}
},
"_ops": 15.941,
"_weight_size": 207.627,
"_file_size": 207.627,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -1114,7 +1114,7 @@ class RegNet_X_16GF_Weights(WeightsEnum):
}
},
"_ops": 15.941,
"_weight_size": 207.627,
"_file_size": 207.627,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......@@ -1140,7 +1140,7 @@ class RegNet_X_32GF_Weights(WeightsEnum):
}
},
"_ops": 31.736,
"_weight_size": 412.039,
"_file_size": 412.039,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -1158,7 +1158,7 @@ class RegNet_X_32GF_Weights(WeightsEnum):
}
},
"_ops": 31.736,
"_weight_size": 412.039,
"_file_size": 412.039,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
......
......@@ -324,7 +324,7 @@ class ResNet18_Weights(WeightsEnum):
}
},
"_ops": 1.814,
"_weight_size": 44.661,
"_file_size": 44.661,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -346,7 +346,7 @@ class ResNet34_Weights(WeightsEnum):
}
},
"_ops": 3.664,
"_weight_size": 83.275,
"_file_size": 83.275,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -368,7 +368,7 @@ class ResNet50_Weights(WeightsEnum):
}
},
"_ops": 4.089,
"_weight_size": 97.781,
"_file_size": 97.781,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -386,7 +386,7 @@ class ResNet50_Weights(WeightsEnum):
}
},
"_ops": 4.089,
"_weight_size": 97.79,
"_file_size": 97.79,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......@@ -411,7 +411,7 @@ class ResNet101_Weights(WeightsEnum):
}
},
"_ops": 7.801,
"_weight_size": 170.511,
"_file_size": 170.511,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -429,7 +429,7 @@ class ResNet101_Weights(WeightsEnum):
}
},
"_ops": 7.801,
"_weight_size": 170.53,
"_file_size": 170.53,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......@@ -454,7 +454,7 @@ class ResNet152_Weights(WeightsEnum):
}
},
"_ops": 11.514,
"_weight_size": 230.434,
"_file_size": 230.434,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -472,7 +472,7 @@ class ResNet152_Weights(WeightsEnum):
}
},
"_ops": 11.514,
"_weight_size": 230.474,
"_file_size": 230.474,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......@@ -497,7 +497,7 @@ class ResNeXt50_32X4D_Weights(WeightsEnum):
}
},
"_ops": 4.23,
"_weight_size": 95.789,
"_file_size": 95.789,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -515,7 +515,7 @@ class ResNeXt50_32X4D_Weights(WeightsEnum):
}
},
"_ops": 4.23,
"_weight_size": 95.833,
"_file_size": 95.833,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......@@ -540,7 +540,7 @@ class ResNeXt101_32X8D_Weights(WeightsEnum):
}
},
"_ops": 16.414,
"_weight_size": 339.586,
"_file_size": 339.586,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -558,7 +558,7 @@ class ResNeXt101_32X8D_Weights(WeightsEnum):
}
},
"_ops": 16.414,
"_weight_size": 339.673,
"_file_size": 339.673,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......@@ -583,7 +583,7 @@ class ResNeXt101_64X4D_Weights(WeightsEnum):
}
},
"_ops": 15.46,
"_weight_size": 319.318,
"_file_size": 319.318,
"_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......@@ -608,7 +608,7 @@ class Wide_ResNet50_2_Weights(WeightsEnum):
}
},
"_ops": 11.398,
"_weight_size": 131.82,
"_file_size": 131.82,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -626,7 +626,7 @@ class Wide_ResNet50_2_Weights(WeightsEnum):
}
},
"_ops": 11.398,
"_weight_size": 263.124,
"_file_size": 263.124,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......@@ -651,7 +651,7 @@ class Wide_ResNet101_2_Weights(WeightsEnum):
}
},
"_ops": 22.753,
"_weight_size": 242.896,
"_file_size": 242.896,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
......@@ -669,7 +669,7 @@ class Wide_ResNet101_2_Weights(WeightsEnum):
}
},
"_ops": 22.753,
"_weight_size": 484.747,
"_file_size": 484.747,
"_docs": """
These weights improve upon the results of the original paper by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......
......@@ -153,7 +153,7 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum):
}
},
"_ops": 178.722,
"_weight_size": 160.515,
"_file_size": 160.515,
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......@@ -174,7 +174,7 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum):
}
},
"_ops": 258.743,
"_weight_size": 233.217,
"_file_size": 233.217,
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......@@ -195,7 +195,7 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum):
}
},
"_ops": 10.452,
"_weight_size": 42.301,
"_file_size": 42.301,
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......
......@@ -72,7 +72,7 @@ class FCN_ResNet50_Weights(WeightsEnum):
}
},
"_ops": 152.717,
"_weight_size": 135.009,
"_file_size": 135.009,
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......@@ -93,7 +93,7 @@ class FCN_ResNet101_Weights(WeightsEnum):
}
},
"_ops": 232.738,
"_weight_size": 207.711,
"_file_size": 207.711,
},
)
DEFAULT = COCO_WITH_VOC_LABELS_V1
......
......@@ -109,7 +109,7 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum):
}
},
"_ops": 2.086,
"_weight_size": 12.49,
"_file_size": 12.49,
"_docs": """
These weights were trained on a subset of COCO, using only the 20 categories that are present in the
Pascal VOC dataset.
......
......@@ -205,7 +205,7 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum):
}
},
"_ops": 0.04,
"_weight_size": 5.282,
"_file_size": 5.282,
"_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""",
},
)
......@@ -227,7 +227,7 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum):
}
},
"_ops": 0.145,
"_weight_size": 8.791,
"_file_size": 8.791,
"_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""",
},
)
......@@ -249,7 +249,7 @@ class ShuffleNet_V2_X1_5_Weights(WeightsEnum):
}
},
"_ops": 0.296,
"_weight_size": 13.557,
"_file_size": 13.557,
"_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......@@ -274,7 +274,7 @@ class ShuffleNet_V2_X2_0_Weights(WeightsEnum):
}
},
"_ops": 0.583,
"_weight_size": 28.433,
"_file_size": 28.433,
"_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......
......@@ -136,7 +136,7 @@ class SqueezeNet1_0_Weights(WeightsEnum):
}
},
"_ops": 0.819,
"_weight_size": 4.778,
"_file_size": 4.778,
},
)
DEFAULT = IMAGENET1K_V1
......@@ -157,7 +157,7 @@ class SqueezeNet1_1_Weights(WeightsEnum):
}
},
"_ops": 0.349,
"_weight_size": 4.729,
"_file_size": 4.729,
},
)
DEFAULT = IMAGENET1K_V1
......
......@@ -663,7 +663,7 @@ class Swin_T_Weights(WeightsEnum):
}
},
"_ops": 4.491,
"_weight_size": 108.19,
"_file_size": 108.19,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
......@@ -688,7 +688,7 @@ class Swin_S_Weights(WeightsEnum):
}
},
"_ops": 8.741,
"_weight_size": 189.786,
"_file_size": 189.786,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
......@@ -713,7 +713,7 @@ class Swin_B_Weights(WeightsEnum):
}
},
"_ops": 15.431,
"_weight_size": 335.364,
"_file_size": 335.364,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
......@@ -738,7 +738,7 @@ class Swin_V2_T_Weights(WeightsEnum):
}
},
"_ops": 5.94,
"_weight_size": 108.626,
"_file_size": 108.626,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
......@@ -763,7 +763,7 @@ class Swin_V2_S_Weights(WeightsEnum):
}
},
"_ops": 11.546,
"_weight_size": 190.675,
"_file_size": 190.675,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
......@@ -788,7 +788,7 @@ class Swin_V2_B_Weights(WeightsEnum):
}
},
"_ops": 20.325,
"_weight_size": 336.372,
"_file_size": 336.372,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
},
)
......
......@@ -128,7 +128,7 @@ class VGG11_Weights(WeightsEnum):
}
},
"_ops": 7.609,
"_weight_size": 506.84,
"_file_size": 506.84,
},
)
DEFAULT = IMAGENET1K_V1
......@@ -148,7 +148,7 @@ class VGG11_BN_Weights(WeightsEnum):
}
},
"_ops": 7.609,
"_weight_size": 506.881,
"_file_size": 506.881,
},
)
DEFAULT = IMAGENET1K_V1
......@@ -168,7 +168,7 @@ class VGG13_Weights(WeightsEnum):
}
},
"_ops": 11.308,
"_weight_size": 507.545,
"_file_size": 507.545,
},
)
DEFAULT = IMAGENET1K_V1
......@@ -188,7 +188,7 @@ class VGG13_BN_Weights(WeightsEnum):
}
},
"_ops": 11.308,
"_weight_size": 507.59,
"_file_size": 507.59,
},
)
DEFAULT = IMAGENET1K_V1
......@@ -208,7 +208,7 @@ class VGG16_Weights(WeightsEnum):
}
},
"_ops": 15.47,
"_weight_size": 527.796,
"_file_size": 527.796,
},
)
IMAGENET1K_FEATURES = Weights(
......@@ -232,7 +232,7 @@ class VGG16_Weights(WeightsEnum):
}
},
"_ops": 15.47,
"_weight_size": 527.802,
"_file_size": 527.802,
"_docs": """
These weights can't be used for classification because they are missing values in the `classifier`
module. Only the `features` module has valid values and can be used for feature extraction. The weights
......@@ -257,7 +257,7 @@ class VGG16_BN_Weights(WeightsEnum):
}
},
"_ops": 15.47,
"_weight_size": 527.866,
"_file_size": 527.866,
},
)
DEFAULT = IMAGENET1K_V1
......@@ -277,7 +277,7 @@ class VGG19_Weights(WeightsEnum):
}
},
"_ops": 19.632,
"_weight_size": 548.051,
"_file_size": 548.051,
},
)
DEFAULT = IMAGENET1K_V1
......@@ -297,7 +297,7 @@ class VGG19_BN_Weights(WeightsEnum):
}
},
"_ops": 19.632,
"_weight_size": 548.143,
"_file_size": 548.143,
},
)
DEFAULT = IMAGENET1K_V1
......
......@@ -625,7 +625,7 @@ class MViT_V1_B_Weights(WeightsEnum):
}
},
"_ops": 70.599,
"_weight_size": 139.764,
"_file_size": 139.764,
},
)
DEFAULT = KINETICS400_V1
......@@ -658,7 +658,7 @@ class MViT_V2_S_Weights(WeightsEnum):
}
},
"_ops": 64.224,
"_weight_size": 131.884,
"_file_size": 131.884,
},
)
DEFAULT = KINETICS400_V1
......
......@@ -333,7 +333,7 @@ class R3D_18_Weights(WeightsEnum):
}
},
"_ops": 40.697,
"_weight_size": 127.359,
"_file_size": 127.359,
},
)
DEFAULT = KINETICS400_V1
......@@ -353,7 +353,7 @@ class MC3_18_Weights(WeightsEnum):
}
},
"_ops": 43.343,
"_weight_size": 44.672,
"_file_size": 44.672,
},
)
DEFAULT = KINETICS400_V1
......@@ -373,7 +373,7 @@ class R2Plus1D_18_Weights(WeightsEnum):
}
},
"_ops": 40.519,
"_weight_size": 120.318,
"_file_size": 120.318,
},
)
DEFAULT = KINETICS400_V1
......
......@@ -176,7 +176,7 @@ class S3D_Weights(WeightsEnum):
}
},
"_ops": 17.979,
"_weight_size": 31.972,
"_file_size": 31.972,
},
)
DEFAULT = KINETICS400_V1
......
......@@ -531,7 +531,7 @@ class Swin3D_T_Weights(WeightsEnum):
}
},
"_ops": 43.882,
"_weight_size": 121.543,
"_file_size": 121.543,
},
)
DEFAULT = KINETICS400_V1
......@@ -562,7 +562,7 @@ class Swin3D_S_Weights(WeightsEnum):
}
},
"_ops": 82.841,
"_weight_size": 218.288,
"_file_size": 218.288,
},
)
DEFAULT = KINETICS400_V1
......@@ -593,7 +593,7 @@ class Swin3D_B_Weights(WeightsEnum):
}
},
"_ops": 140.667,
"_weight_size": 364.134,
"_file_size": 364.134,
},
)
KINETICS400_IMAGENET22K_V1 = Weights(
......@@ -620,7 +620,7 @@ class Swin3D_B_Weights(WeightsEnum):
}
},
"_ops": 140.667,
"_weight_size": 364.134,
"_file_size": 364.134,
},
)
DEFAULT = KINETICS400_V1
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment