"git@developer.sourcefind.cn:OpenDAS/vision.git" did not exist on "6397190105cf7392db42bb8c04816c22f8b5ccc7"
Unverified Commit c67a5839 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Clean up model documentation (#6003)

* Remove old "minimum input size" from docstrings.

* Remove "currently only XYZ weights available"

* Fix description of wide_resnet101_2

* Make display license URLs as links.

* Clarify the order of dims of min_size.

* Remove lengthy keypoint_names from meta-table.
parent ac016599
...@@ -347,15 +347,19 @@ def inject_weight_metadata(app, what, name, obj, options, lines): ...@@ -347,15 +347,19 @@ def inject_weight_metadata(app, what, name, obj, options, lines):
metrics = meta.pop("metrics", {}) metrics = meta.pop("metrics", {})
meta_with_metrics = dict(meta, **metrics) meta_with_metrics = dict(meta, **metrics)
meta_with_metrics.pop("categories", None) # We don't want to document these, they can be too long # We don't want to document these, they can be too long
for k in ["categories", "keypoint_names"]:
meta_with_metrics.pop(k, None)
custom_docs = meta_with_metrics.pop("_docs", None) # Custom per-Weights docs custom_docs = meta_with_metrics.pop("_docs", None) # Custom per-Weights docs
if custom_docs is not None: if custom_docs is not None:
lines += [custom_docs, ""] lines += [custom_docs, ""]
for k, v in meta_with_metrics.items(): for k, v in meta_with_metrics.items():
if k == "recipe": if k in {"recipe", "license"}:
v = f"`link <{v}>`__" v = f"`link <{v}>`__"
elif k == "min_size":
v = f"height={v[0]}, width={v[1]}"
table.append((str(k), str(v))) table.append((str(k), str(v)))
table = tabulate(table, tablefmt="rst") table = tabulate(table, tablefmt="rst")
lines += [".. rst-class:: table-weights"] # Custom CSS class, see custom_torchvision.css lines += [".. rst-class:: table-weights"] # Custom CSS class, see custom_torchvision.css
......
...@@ -11,8 +11,7 @@ Search for Mobile <https://arxiv.org/pdf/1807.11626.pdf>`__ paper. ...@@ -11,8 +11,7 @@ Search for Mobile <https://arxiv.org/pdf/1807.11626.pdf>`__ paper.
Model builders Model builders
-------------- --------------
The following model builders can be used to instanciate an MNASNet model. Currently The following model builders can be used to instanciate an MNASNet model.
only ``mnasnet0_5`` and ``mnasnet1_0`` can be instantiated with pre-trained weights.
All the model builders internally rely on the All the model builders internally rely on the
``torchvision.models.mnasnet.MNASNet`` base class. Please refer to the `source ``torchvision.models.mnasnet.MNASNet`` base class. Please refer to the `source
code code
......
...@@ -74,8 +74,6 @@ class AlexNet_Weights(WeightsEnum): ...@@ -74,8 +74,6 @@ class AlexNet_Weights(WeightsEnum):
def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet: def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet:
"""AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__. """AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__.
The required minimum input size of the model is 63x63.
.. note:: .. note::
AlexNet was originally introduced in the `ImageNet Classification with AlexNet was originally introduced in the `ImageNet Classification with
Deep Convolutional Neural Networks Deep Convolutional Neural Networks
......
...@@ -332,7 +332,6 @@ class DenseNet201_Weights(WeightsEnum): ...@@ -332,7 +332,6 @@ class DenseNet201_Weights(WeightsEnum):
def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-121 model from r"""Densenet-121 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_. `Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
The required minimum input size of the model is 29x29.
Args: Args:
weights (:class:`~torchvision.models.DenseNet121_Weights`, optional): The weights (:class:`~torchvision.models.DenseNet121_Weights`, optional): The
...@@ -358,7 +357,6 @@ def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool ...@@ -358,7 +357,6 @@ def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool
def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-161 model from r"""Densenet-161 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_. `Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
The required minimum input size of the model is 29x29.
Args: Args:
weights (:class:`~torchvision.models.DenseNet161_Weights`, optional): The weights (:class:`~torchvision.models.DenseNet161_Weights`, optional): The
...@@ -384,7 +382,6 @@ def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool ...@@ -384,7 +382,6 @@ def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool
def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-169 model from r"""Densenet-169 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_. `Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
The required minimum input size of the model is 29x29.
Args: Args:
weights (:class:`~torchvision.models.DenseNet169_Weights`, optional): The weights (:class:`~torchvision.models.DenseNet169_Weights`, optional): The
...@@ -410,7 +407,6 @@ def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool ...@@ -410,7 +407,6 @@ def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool
def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-201 model from r"""Densenet-201 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_. `Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
The required minimum input size of the model is 29x29.
Args: Args:
weights (:class:`~torchvision.models.DenseNet201_Weights`, optional): The weights (:class:`~torchvision.models.DenseNet201_Weights`, optional): The
......
...@@ -298,8 +298,6 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T ...@@ -298,8 +298,6 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T
"""GoogLeNet (Inception v1) model architecture from """GoogLeNet (Inception v1) model architecture from
`Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`_. `Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`_.
The required minimum input size of the model is 15x15.
Args: Args:
weights (:class:`~torchvision.models.GoogLeNet_Weights`, optional): The weights (:class:`~torchvision.models.GoogLeNet_Weights`, optional): The
pretrained weights for the model. See pretrained weights for the model. See
......
...@@ -430,7 +430,6 @@ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bo ...@@ -430,7 +430,6 @@ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bo
""" """
Inception v3 model architecture from Inception v3 model architecture from
`Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`_. `Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`_.
The required minimum input size of the model is 75x75.
.. note:: .. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of **Important**: In contrast to the other models the inception_v3 expects tensors with a size of
......
...@@ -147,8 +147,6 @@ def googlenet( ...@@ -147,8 +147,6 @@ def googlenet(
weights. Quantized models only support inference and run on CPUs. weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported GPU inference is not yet supported
The required minimum input size of the model is 15x15.
Args: Args:
weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The
pretrained weights for the model. See pretrained weights for the model. See
......
...@@ -831,8 +831,8 @@ def wide_resnet101_2( ...@@ -831,8 +831,8 @@ def wide_resnet101_2(
The model is the same as ResNet except for the bottleneck number of channels The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1 which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 convolutions is the same, e.g. last block in ResNet-101 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048. channels, and in Wide ResNet-101-2 has 2048-1024-2048.
Args: Args:
weights (:class:`~torchvision.models.Wide_ResNet101_2_Weights`, optional): The weights (:class:`~torchvision.models.Wide_ResNet101_2_Weights`, optional): The
......
...@@ -162,8 +162,6 @@ def squeezenet1_0( ...@@ -162,8 +162,6 @@ def squeezenet1_0(
accuracy with 50x fewer parameters and <0.5MB model size accuracy with 50x fewer parameters and <0.5MB model size
<https://arxiv.org/abs/1602.07360>`_ paper. <https://arxiv.org/abs/1602.07360>`_ paper.
The required minimum input size of the model is 21x21.
Args: Args:
weights (:class:`~torchvision.models.SqueezeNet1_0_Weights`, optional): The weights (:class:`~torchvision.models.SqueezeNet1_0_Weights`, optional): The
pretrained weights to use. See pretrained weights to use. See
...@@ -193,7 +191,6 @@ def squeezenet1_1( ...@@ -193,7 +191,6 @@ def squeezenet1_1(
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy. than SqueezeNet 1.0, without sacrificing accuracy.
The required minimum input size of the model is 17x17.
Args: Args:
weights (:class:`~torchvision.models.SqueezeNet1_1_Weights`, optional): The weights (:class:`~torchvision.models.SqueezeNet1_1_Weights`, optional): The
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment