"docs/source/git@developer.sourcefind.cn:OpenDAS/vision.git" did not exist on "f861003c8819cd19737ba0f1cbfbc08f408404db"
Unverified Commit deba0562 authored by toni057's avatar toni057 Committed by GitHub
Browse files

Adding FLOPs and size to model metadata (#6936)



* Adding FLOPs and size to model metadata

* Adding weight size to quantization models

* Small refactor of rich metadata

* Removing unused code

* Fixing wrong entries

* Adding .DS_Store to gitignore

* Renaming _flops to _ops

* Adding number of operations to quantization models

* Reflecting _flops change to _ops

* Renamed ops and weight size in individual model doc pages

* Linter fixes

* Rounding ops to first decimal

* Rounding num ops and sizes to 3 decimals

* Change naming of columns.

* Update tables
Co-authored-by: default avatarToni Blaslov <tblaslov@fb.com>
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent ad2eceab
...@@ -362,6 +362,14 @@ def inject_weight_metadata(app, what, name, obj, options, lines): ...@@ -362,6 +362,14 @@ def inject_weight_metadata(app, what, name, obj, options, lines):
max_visible = 3 max_visible = 3
v_sample = ", ".join(v[:max_visible]) v_sample = ", ".join(v[:max_visible])
v = f"{v_sample}, ... ({len(v)-max_visible} omitted)" if len(v) > max_visible else v_sample v = f"{v_sample}, ... ({len(v)-max_visible} omitted)" if len(v) > max_visible else v_sample
elif k == "_ops":
if obj.__name__.endswith("_QuantizedWeights"):
v = f"{v} giga instructions per sec"
else:
v = f"{v} giga floating-point operations per sec"
elif k == "_weight_size":
v = f"{v} MB (file size)"
table.append((str(k), str(v))) table.append((str(k), str(v)))
table = tabulate(table, tablefmt="rst") table = tabulate(table, tablefmt="rst")
lines += [".. rst-class:: table-weights"] # Custom CSS class, see custom_torchvision.css lines += [".. rst-class:: table-weights"] # Custom CSS class, see custom_torchvision.css
...@@ -385,19 +393,30 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern ...@@ -385,19 +393,30 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern
if exclude_patterns is not None: if exclude_patterns is not None:
weights = [w for w in weights if all(p not in str(w) for p in exclude_patterns)] weights = [w for w in weights if all(p not in str(w) for p in exclude_patterns)]
ops_name = "GIPS" if "QuantizedWeights" in weights_endswith else "GFLOPS"
metrics_keys, metrics_names = zip(*metrics) metrics_keys, metrics_names = zip(*metrics)
column_names = ["Weight"] + list(metrics_names) + ["Params", "Recipe"] column_names = (
["Weight"] + list(metrics_names) + ["Params"] + [ops_name, "Size (MB)", "Recipe"]
) # Final column order
column_names = [f"**{name}**" for name in column_names] # Add bold column_names = [f"**{name}**" for name in column_names] # Add bold
content = [ content = []
( for w in weights:
row = [
f":class:`{w} <{type(w).__name__}>`", f":class:`{w} <{type(w).__name__}>`",
*(w.meta["_metrics"][dataset][metric] for metric in metrics_keys), *(w.meta["_metrics"][dataset][metric] for metric in metrics_keys),
f"{w.meta['num_params']/1e6:.1f}M", f"{w.meta['num_params']/1e6:.1f}M",
f"{w.meta['_ops']:.3f}",
f"{round(w.meta['_weight_size'], 1):.1f}",
f"`link <{w.meta['recipe']}>`__", f"`link <{w.meta['recipe']}>`__",
) ]
for w in weights
] content.append(row)
column_widths = ["110"] + ["18"] * len(metrics_names) + ["18"] * 3 + ["10"]
widths_table = " ".join(column_widths)
table = tabulate(content, headers=column_names, tablefmt="rst") table = tabulate(content, headers=column_names, tablefmt="rst")
generated_dir = Path("generated") generated_dir = Path("generated")
...@@ -405,7 +424,7 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern ...@@ -405,7 +424,7 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern
with open(generated_dir / f"{table_name}_table.rst", "w+") as table_file: with open(generated_dir / f"{table_name}_table.rst", "w+") as table_file:
table_file.write(".. rst-class:: table-weights\n") # Custom CSS class, see custom_torchvision.css table_file.write(".. rst-class:: table-weights\n") # Custom CSS class, see custom_torchvision.css
table_file.write(".. table::\n") table_file.write(".. table::\n")
table_file.write(f" :widths: 100 {'20 ' * len(metrics_names)} 20 10\n\n") table_file.write(f" :widths: {widths_table} \n\n")
table_file.write(f"{textwrap.indent(table, ' ' * 4)}\n\n") table_file.write(f"{textwrap.indent(table, ' ' * 4)}\n\n")
......
...@@ -155,11 +155,13 @@ def test_schema_meta_validation(model_fn): ...@@ -155,11 +155,13 @@ def test_schema_meta_validation(model_fn):
"recipe", "recipe",
"unquantized", "unquantized",
"_docs", "_docs",
"_ops",
"_weight_size",
} }
# mandatory fields for each computer vision task # mandatory fields for each computer vision task
classification_fields = {"categories", ("_metrics", "ImageNet-1K", "acc@1"), ("_metrics", "ImageNet-1K", "acc@5")} classification_fields = {"categories", ("_metrics", "ImageNet-1K", "acc@1"), ("_metrics", "ImageNet-1K", "acc@5")}
defaults = { defaults = {
"all": {"_metrics", "min_size", "num_params", "recipe", "_docs"}, "all": {"_metrics", "min_size", "num_params", "recipe", "_docs", "_weight_size", "_ops"},
"models": classification_fields, "models": classification_fields,
"detection": {"categories", ("_metrics", "COCO-val2017", "box_map")}, "detection": {"categories", ("_metrics", "COCO-val2017", "box_map")},
"quantization": classification_fields | {"backend", "unquantized"}, "quantization": classification_fields | {"backend", "unquantized"},
......
...@@ -67,6 +67,8 @@ class AlexNet_Weights(WeightsEnum): ...@@ -67,6 +67,8 @@ class AlexNet_Weights(WeightsEnum):
"acc@5": 79.066, "acc@5": 79.066,
} }
}, },
"_ops": 0.714,
"_weight_size": 233.087,
"_docs": """ "_docs": """
These weights reproduce closely the results of the paper using a simplified training recipe. These weights reproduce closely the results of the paper using a simplified training recipe.
""", """,
......
...@@ -219,6 +219,8 @@ class ConvNeXt_Tiny_Weights(WeightsEnum): ...@@ -219,6 +219,8 @@ class ConvNeXt_Tiny_Weights(WeightsEnum):
"acc@5": 96.146, "acc@5": 96.146,
} }
}, },
"_ops": 4.456,
"_weight_size": 109.119,
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -237,6 +239,8 @@ class ConvNeXt_Small_Weights(WeightsEnum): ...@@ -237,6 +239,8 @@ class ConvNeXt_Small_Weights(WeightsEnum):
"acc@5": 96.650, "acc@5": 96.650,
} }
}, },
"_ops": 8.684,
"_weight_size": 191.703,
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -255,6 +259,8 @@ class ConvNeXt_Base_Weights(WeightsEnum): ...@@ -255,6 +259,8 @@ class ConvNeXt_Base_Weights(WeightsEnum):
"acc@5": 96.870, "acc@5": 96.870,
} }
}, },
"_ops": 15.355,
"_weight_size": 338.064,
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -273,6 +279,8 @@ class ConvNeXt_Large_Weights(WeightsEnum): ...@@ -273,6 +279,8 @@ class ConvNeXt_Large_Weights(WeightsEnum):
"acc@5": 96.976, "acc@5": 96.976,
} }
}, },
"_ops": 34.361,
"_weight_size": 754.537,
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
......
...@@ -15,7 +15,6 @@ from ._api import register_model, Weights, WeightsEnum ...@@ -15,7 +15,6 @@ from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [ __all__ = [
"DenseNet", "DenseNet",
"DenseNet121_Weights", "DenseNet121_Weights",
...@@ -278,6 +277,8 @@ class DenseNet121_Weights(WeightsEnum): ...@@ -278,6 +277,8 @@ class DenseNet121_Weights(WeightsEnum):
"acc@5": 91.972, "acc@5": 91.972,
} }
}, },
"_ops": 2.834,
"_weight_size": 30.845,
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -296,6 +297,8 @@ class DenseNet161_Weights(WeightsEnum): ...@@ -296,6 +297,8 @@ class DenseNet161_Weights(WeightsEnum):
"acc@5": 93.560, "acc@5": 93.560,
} }
}, },
"_ops": 7.728,
"_weight_size": 110.369,
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -314,6 +317,8 @@ class DenseNet169_Weights(WeightsEnum): ...@@ -314,6 +317,8 @@ class DenseNet169_Weights(WeightsEnum):
"acc@5": 92.806, "acc@5": 92.806,
} }
}, },
"_ops": 3.36,
"_weight_size": 54.708,
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -332,6 +337,8 @@ class DenseNet201_Weights(WeightsEnum): ...@@ -332,6 +337,8 @@ class DenseNet201_Weights(WeightsEnum):
"acc@5": 93.370, "acc@5": 93.370,
} }
}, },
"_ops": 4.291,
"_weight_size": 77.373,
}, },
) )
DEFAULT = IMAGENET1K_V1 DEFAULT = IMAGENET1K_V1
...@@ -444,7 +451,6 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool ...@@ -444,7 +451,6 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool
# The dictionary below is internal implementation detail and will be removed in v0.15 # The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs from ._utils import _ModelURLs
model_urls = _ModelURLs( model_urls = _ModelURLs(
{ {
"densenet121": DenseNet121_Weights.IMAGENET1K_V1.url, "densenet121": DenseNet121_Weights.IMAGENET1K_V1.url,
......
...@@ -388,6 +388,8 @@ class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum): ...@@ -388,6 +388,8 @@ class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum):
"box_map": 37.0, "box_map": 37.0,
} }
}, },
"_ops": 134.38,
"_weight_size": 159.743,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""", "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
}, },
) )
...@@ -407,6 +409,8 @@ class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): ...@@ -407,6 +409,8 @@ class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
"box_map": 46.7, "box_map": 46.7,
} }
}, },
"_ops": 280.371,
"_weight_size": 167.104,
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
}, },
) )
...@@ -426,6 +430,8 @@ class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum): ...@@ -426,6 +430,8 @@ class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum):
"box_map": 32.8, "box_map": 32.8,
} }
}, },
"_ops": 4.494,
"_weight_size": 74.239,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""", "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
}, },
) )
...@@ -445,6 +451,8 @@ class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum): ...@@ -445,6 +451,8 @@ class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum):
"box_map": 22.8, "box_map": 22.8,
} }
}, },
"_ops": 0.719,
"_weight_size": 74.239,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""", "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
}, },
) )
......
...@@ -662,6 +662,8 @@ class FCOS_ResNet50_FPN_Weights(WeightsEnum): ...@@ -662,6 +662,8 @@ class FCOS_ResNet50_FPN_Weights(WeightsEnum):
"box_map": 39.2, "box_map": 39.2,
} }
}, },
"_ops": 128.207,
"_weight_size": 123.608,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""", "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
}, },
) )
......
...@@ -328,6 +328,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): ...@@ -328,6 +328,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum):
"kp_map": 61.1, "kp_map": 61.1,
} }
}, },
"_ops": 133.924,
"_weight_size": 226.054,
"_docs": """ "_docs": """
These weights were produced by following a similar training recipe as on the paper but use a checkpoint These weights were produced by following a similar training recipe as on the paper but use a checkpoint
from an early epoch. from an early epoch.
...@@ -347,6 +349,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): ...@@ -347,6 +349,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum):
"kp_map": 65.0, "kp_map": 65.0,
} }
}, },
"_ops": 137.42,
"_weight_size": 226.054,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""", "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
}, },
) )
......
...@@ -370,6 +370,8 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum): ...@@ -370,6 +370,8 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum):
"mask_map": 34.6, "mask_map": 34.6,
} }
}, },
"_ops": 134.38,
"_weight_size": 169.84,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""", "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
}, },
) )
...@@ -390,6 +392,8 @@ class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): ...@@ -390,6 +392,8 @@ class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
"mask_map": 41.8, "mask_map": 41.8,
} }
}, },
"_ops": 333.577,
"_weight_size": 177.219,
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
}, },
) )
......
...@@ -690,6 +690,8 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum): ...@@ -690,6 +690,8 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum):
"box_map": 36.4, "box_map": 36.4,
} }
}, },
"_ops": 151.54,
"_weight_size": 130.267,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""", "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
}, },
) )
...@@ -709,6 +711,8 @@ class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum): ...@@ -709,6 +711,8 @@ class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum):
"box_map": 41.5, "box_map": 41.5,
} }
}, },
"_ops": 152.238,
"_weight_size": 146.037,
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
}, },
) )
......
...@@ -39,6 +39,8 @@ class SSD300_VGG16_Weights(WeightsEnum): ...@@ -39,6 +39,8 @@ class SSD300_VGG16_Weights(WeightsEnum):
"box_map": 25.1, "box_map": 25.1,
} }
}, },
"_ops": 34.858,
"_weight_size": 135.988,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""", "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
}, },
) )
......
...@@ -198,6 +198,8 @@ class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum): ...@@ -198,6 +198,8 @@ class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum):
"box_map": 21.3, "box_map": 21.3,
} }
}, },
"_ops": 0.583,
"_weight_size": 13.418,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""", "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
}, },
) )
......
...@@ -464,6 +464,8 @@ class EfficientNet_B0_Weights(WeightsEnum): ...@@ -464,6 +464,8 @@ class EfficientNet_B0_Weights(WeightsEnum):
"acc@5": 93.532, "acc@5": 93.532,
} }
}, },
"_ops": 0.386,
"_weight_size": 20.451,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
...@@ -486,6 +488,8 @@ class EfficientNet_B1_Weights(WeightsEnum): ...@@ -486,6 +488,8 @@ class EfficientNet_B1_Weights(WeightsEnum):
"acc@5": 94.186, "acc@5": 94.186,
} }
}, },
"_ops": 0.687,
"_weight_size": 30.134,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
...@@ -504,6 +508,8 @@ class EfficientNet_B1_Weights(WeightsEnum): ...@@ -504,6 +508,8 @@ class EfficientNet_B1_Weights(WeightsEnum):
"acc@5": 94.934, "acc@5": 94.934,
} }
}, },
"_ops": 0.687,
"_weight_size": 30.136,
"_docs": """ "_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe `new training recipe
...@@ -530,6 +536,8 @@ class EfficientNet_B2_Weights(WeightsEnum): ...@@ -530,6 +536,8 @@ class EfficientNet_B2_Weights(WeightsEnum):
"acc@5": 95.310, "acc@5": 95.310,
} }
}, },
"_ops": 1.088,
"_weight_size": 35.174,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
...@@ -552,6 +560,8 @@ class EfficientNet_B3_Weights(WeightsEnum): ...@@ -552,6 +560,8 @@ class EfficientNet_B3_Weights(WeightsEnum):
"acc@5": 96.054, "acc@5": 96.054,
} }
}, },
"_ops": 1.827,
"_weight_size": 47.184,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
...@@ -574,6 +584,8 @@ class EfficientNet_B4_Weights(WeightsEnum): ...@@ -574,6 +584,8 @@ class EfficientNet_B4_Weights(WeightsEnum):
"acc@5": 96.594, "acc@5": 96.594,
} }
}, },
"_ops": 4.394,
"_weight_size": 74.489,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
...@@ -596,6 +608,8 @@ class EfficientNet_B5_Weights(WeightsEnum): ...@@ -596,6 +608,8 @@ class EfficientNet_B5_Weights(WeightsEnum):
"acc@5": 96.628, "acc@5": 96.628,
} }
}, },
"_ops": 10.266,
"_weight_size": 116.864,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
...@@ -618,6 +632,8 @@ class EfficientNet_B6_Weights(WeightsEnum): ...@@ -618,6 +632,8 @@ class EfficientNet_B6_Weights(WeightsEnum):
"acc@5": 96.916, "acc@5": 96.916,
} }
}, },
"_ops": 19.068,
"_weight_size": 165.362,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
...@@ -640,6 +656,8 @@ class EfficientNet_B7_Weights(WeightsEnum): ...@@ -640,6 +656,8 @@ class EfficientNet_B7_Weights(WeightsEnum):
"acc@5": 96.908, "acc@5": 96.908,
} }
}, },
"_ops": 37.746,
"_weight_size": 254.675,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
...@@ -664,6 +682,8 @@ class EfficientNet_V2_S_Weights(WeightsEnum): ...@@ -664,6 +682,8 @@ class EfficientNet_V2_S_Weights(WeightsEnum):
"acc@5": 96.878, "acc@5": 96.878,
} }
}, },
"_ops": 8.366,
"_weight_size": 82.704,
"_docs": """ "_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe `new training recipe
...@@ -692,6 +712,8 @@ class EfficientNet_V2_M_Weights(WeightsEnum): ...@@ -692,6 +712,8 @@ class EfficientNet_V2_M_Weights(WeightsEnum):
"acc@5": 97.156, "acc@5": 97.156,
} }
}, },
"_ops": 24.582,
"_weight_size": 208.01,
"_docs": """ "_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe `new training recipe
...@@ -723,6 +745,8 @@ class EfficientNet_V2_L_Weights(WeightsEnum): ...@@ -723,6 +745,8 @@ class EfficientNet_V2_L_Weights(WeightsEnum):
"acc@5": 97.788, "acc@5": 97.788,
} }
}, },
"_ops": 56.08,
"_weight_size": 454.573,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
......
...@@ -290,6 +290,8 @@ class GoogLeNet_Weights(WeightsEnum): ...@@ -290,6 +290,8 @@ class GoogLeNet_Weights(WeightsEnum):
"acc@5": 89.530, "acc@5": 89.530,
} }
}, },
"_ops": 1.498,
"_weight_size": 49.731,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
......
...@@ -422,6 +422,8 @@ class Inception_V3_Weights(WeightsEnum): ...@@ -422,6 +422,8 @@ class Inception_V3_Weights(WeightsEnum):
"acc@5": 93.450, "acc@5": 93.450,
} }
}, },
"_ops": 5.713,
"_weight_size": 103.903,
"_docs": """These weights are ported from the original paper.""", "_docs": """These weights are ported from the original paper.""",
}, },
) )
......
...@@ -785,6 +785,8 @@ class MaxVit_T_Weights(WeightsEnum): ...@@ -785,6 +785,8 @@ class MaxVit_T_Weights(WeightsEnum):
"acc@5": 96.722, "acc@5": 96.722,
} }
}, },
"_ops": 5.558,
"_weight_size": 118.769,
"_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""",
}, },
) )
......
...@@ -231,6 +231,8 @@ class MNASNet0_5_Weights(WeightsEnum): ...@@ -231,6 +231,8 @@ class MNASNet0_5_Weights(WeightsEnum):
"acc@5": 87.490, "acc@5": 87.490,
} }
}, },
"_ops": 0.104,
"_weight_size": 8.591,
"_docs": """These weights reproduce closely the results of the paper.""", "_docs": """These weights reproduce closely the results of the paper.""",
}, },
) )
...@@ -251,6 +253,8 @@ class MNASNet0_75_Weights(WeightsEnum): ...@@ -251,6 +253,8 @@ class MNASNet0_75_Weights(WeightsEnum):
"acc@5": 90.496, "acc@5": 90.496,
} }
}, },
"_ops": 0.215,
"_weight_size": 12.303,
"_docs": """ "_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_. <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
...@@ -273,6 +277,8 @@ class MNASNet1_0_Weights(WeightsEnum): ...@@ -273,6 +277,8 @@ class MNASNet1_0_Weights(WeightsEnum):
"acc@5": 91.510, "acc@5": 91.510,
} }
}, },
"_ops": 0.314,
"_weight_size": 16.915,
"_docs": """These weights reproduce closely the results of the paper.""", "_docs": """These weights reproduce closely the results of the paper.""",
}, },
) )
...@@ -293,6 +299,8 @@ class MNASNet1_3_Weights(WeightsEnum): ...@@ -293,6 +299,8 @@ class MNASNet1_3_Weights(WeightsEnum):
"acc@5": 93.522, "acc@5": 93.522,
} }
}, },
"_ops": 0.526,
"_weight_size": 24.246,
"_docs": """ "_docs": """
These weights were trained from scratch by using TorchVision's `new training recipe These weights were trained from scratch by using TorchVision's `new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_. <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
......
...@@ -194,6 +194,8 @@ class MobileNet_V2_Weights(WeightsEnum): ...@@ -194,6 +194,8 @@ class MobileNet_V2_Weights(WeightsEnum):
"acc@5": 90.286, "acc@5": 90.286,
} }
}, },
"_ops": 0.301,
"_weight_size": 13.555,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
}, },
) )
...@@ -209,6 +211,8 @@ class MobileNet_V2_Weights(WeightsEnum): ...@@ -209,6 +211,8 @@ class MobileNet_V2_Weights(WeightsEnum):
"acc@5": 90.822, "acc@5": 90.822,
} }
}, },
"_ops": 0.301,
"_weight_size": 13.598,
"_docs": """ "_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe `new training recipe
......
...@@ -307,6 +307,8 @@ class MobileNet_V3_Large_Weights(WeightsEnum): ...@@ -307,6 +307,8 @@ class MobileNet_V3_Large_Weights(WeightsEnum):
"acc@5": 91.340, "acc@5": 91.340,
} }
}, },
"_ops": 0.217,
"_weight_size": 21.114,
"_docs": """These weights were trained from scratch by using a simple training recipe.""", "_docs": """These weights were trained from scratch by using a simple training recipe.""",
}, },
) )
...@@ -323,6 +325,8 @@ class MobileNet_V3_Large_Weights(WeightsEnum): ...@@ -323,6 +325,8 @@ class MobileNet_V3_Large_Weights(WeightsEnum):
"acc@5": 92.566, "acc@5": 92.566,
} }
}, },
"_ops": 0.217,
"_weight_size": 21.107,
"_docs": """ "_docs": """
These weights improve marginally upon the results of the original paper by using a modified version of These weights improve marginally upon the results of the original paper by using a modified version of
TorchVision's `new training recipe TorchVision's `new training recipe
...@@ -347,6 +351,8 @@ class MobileNet_V3_Small_Weights(WeightsEnum): ...@@ -347,6 +351,8 @@ class MobileNet_V3_Small_Weights(WeightsEnum):
"acc@5": 87.402, "acc@5": 87.402,
} }
}, },
"_ops": 0.057,
"_weight_size": 9.829,
"_docs": """ "_docs": """
These weights improve upon the results of the original paper by using a simple training recipe. These weights improve upon the results of the original paper by using a simple training recipe.
""", """,
......
...@@ -552,6 +552,8 @@ class Raft_Large_Weights(WeightsEnum): ...@@ -552,6 +552,8 @@ class Raft_Large_Weights(WeightsEnum):
"Sintel-Train-Finalpass": {"epe": 2.7894}, "Sintel-Train-Finalpass": {"epe": 2.7894},
"Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506}, "Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506},
}, },
"_ops": 211.007,
"_weight_size": 20.129,
"_docs": """These weights were ported from the original paper. They "_docs": """These weights were ported from the original paper. They
are trained on :class:`~torchvision.datasets.FlyingChairs` + are trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""", :class:`~torchvision.datasets.FlyingThings3D`.""",
...@@ -570,6 +572,8 @@ class Raft_Large_Weights(WeightsEnum): ...@@ -570,6 +572,8 @@ class Raft_Large_Weights(WeightsEnum):
"Sintel-Train-Finalpass": {"epe": 2.7161}, "Sintel-Train-Finalpass": {"epe": 2.7161},
"Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679}, "Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679},
}, },
"_ops": 211.007,
"_weight_size": 20.129,
"_docs": """These weights were trained from scratch on "_docs": """These weights were trained from scratch on
:class:`~torchvision.datasets.FlyingChairs` + :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""", :class:`~torchvision.datasets.FlyingThings3D`.""",
...@@ -588,6 +592,8 @@ class Raft_Large_Weights(WeightsEnum): ...@@ -588,6 +592,8 @@ class Raft_Large_Weights(WeightsEnum):
"Sintel-Test-Cleanpass": {"epe": 1.94}, "Sintel-Test-Cleanpass": {"epe": 1.94},
"Sintel-Test-Finalpass": {"epe": 3.18}, "Sintel-Test-Finalpass": {"epe": 3.18},
}, },
"_ops": 211.007,
"_weight_size": 20.129,
"_docs": """ "_docs": """
These weights were ported from the original paper. They are These weights were ported from the original paper. They are
trained on :class:`~torchvision.datasets.FlyingChairs` + trained on :class:`~torchvision.datasets.FlyingChairs` +
...@@ -612,6 +618,8 @@ class Raft_Large_Weights(WeightsEnum): ...@@ -612,6 +618,8 @@ class Raft_Large_Weights(WeightsEnum):
"Sintel-Test-Cleanpass": {"epe": 1.819}, "Sintel-Test-Cleanpass": {"epe": 1.819},
"Sintel-Test-Finalpass": {"epe": 3.067}, "Sintel-Test-Finalpass": {"epe": 3.067},
}, },
"_ops": 211.007,
"_weight_size": 20.129,
"_docs": """ "_docs": """
These weights were trained from scratch. They are These weights were trained from scratch. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` + pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
...@@ -636,6 +644,8 @@ class Raft_Large_Weights(WeightsEnum): ...@@ -636,6 +644,8 @@ class Raft_Large_Weights(WeightsEnum):
"_metrics": { "_metrics": {
"Kitti-Test": {"fl_all": 5.10}, "Kitti-Test": {"fl_all": 5.10},
}, },
"_ops": 211.007,
"_weight_size": 20.129,
"_docs": """ "_docs": """
These weights were ported from the original paper. They are These weights were ported from the original paper. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` + pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
...@@ -657,6 +667,8 @@ class Raft_Large_Weights(WeightsEnum): ...@@ -657,6 +667,8 @@ class Raft_Large_Weights(WeightsEnum):
"_metrics": { "_metrics": {
"Kitti-Test": {"fl_all": 5.19}, "Kitti-Test": {"fl_all": 5.19},
}, },
"_ops": 211.007,
"_weight_size": 20.129,
"_docs": """ "_docs": """
These weights were trained from scratch. They are These weights were trained from scratch. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` + pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
...@@ -698,6 +710,8 @@ class Raft_Small_Weights(WeightsEnum): ...@@ -698,6 +710,8 @@ class Raft_Small_Weights(WeightsEnum):
"Sintel-Train-Finalpass": {"epe": 3.2790}, "Sintel-Train-Finalpass": {"epe": 3.2790},
"Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801}, "Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801},
}, },
"_ops": 47.655,
"_weight_size": 3.821,
"_docs": """These weights were ported from the original paper. They "_docs": """These weights were ported from the original paper. They
are trained on :class:`~torchvision.datasets.FlyingChairs` + are trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""", :class:`~torchvision.datasets.FlyingThings3D`.""",
...@@ -715,6 +729,8 @@ class Raft_Small_Weights(WeightsEnum): ...@@ -715,6 +729,8 @@ class Raft_Small_Weights(WeightsEnum):
"Sintel-Train-Finalpass": {"epe": 3.2831}, "Sintel-Train-Finalpass": {"epe": 3.2831},
"Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369}, "Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369},
}, },
"_ops": 47.655,
"_weight_size": 3.821,
"_docs": """These weights were trained from scratch on "_docs": """These weights were trained from scratch on
:class:`~torchvision.datasets.FlyingChairs` + :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""", :class:`~torchvision.datasets.FlyingThings3D`.""",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment