Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
a27522cb
Unverified
Commit
a27522cb
authored
Feb 24, 2023
by
Nicolas Hug
Committed by
GitHub
Feb 24, 2023
Browse files
Change betastatus doc warning and v2 import warning (#7329)
parent
92d75e63
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
73 additions
and
61 deletions
+73
-61
docs/source/beta_status.py
docs/source/beta_status.py
+13
-2
docs/source/transforms.rst
docs/source/transforms.rst
+4
-4
torchvision/__init__.py
torchvision/__init__.py
+5
-4
torchvision/transforms/v2/_augment.py
torchvision/transforms/v2/_augment.py
+1
-1
torchvision/transforms/v2/_auto_augment.py
torchvision/transforms/v2/_auto_augment.py
+4
-4
torchvision/transforms/v2/_color.py
torchvision/transforms/v2/_color.py
+10
-10
torchvision/transforms/v2/_container.py
torchvision/transforms/v2/_container.py
+4
-4
torchvision/transforms/v2/_deprecated.py
torchvision/transforms/v2/_deprecated.py
+1
-1
torchvision/transforms/v2/_geometry.py
torchvision/transforms/v2/_geometry.py
+18
-18
torchvision/transforms/v2/_meta.py
torchvision/transforms/v2/_meta.py
+3
-3
torchvision/transforms/v2/_misc.py
torchvision/transforms/v2/_misc.py
+6
-6
torchvision/transforms/v2/_temporal.py
torchvision/transforms/v2/_temporal.py
+1
-1
torchvision/transforms/v2/_type_conversion.py
torchvision/transforms/v2/_type_conversion.py
+3
-3
No files found.
docs/source/beta_status.py
View file @
a27522cb
...
@@ -4,15 +4,26 @@ from docutils.parsers.rst import Directive
...
@@ -4,15 +4,26 @@ from docutils.parsers.rst import Directive
class
BetaStatus
(
Directive
):
class
BetaStatus
(
Directive
):
has_content
=
True
has_content
=
True
text
=
"The {api_name} is in Beta stage, and backward compatibility is not guaranteed."
def
run
(
self
):
def
run
(
self
):
api_name
=
" "
.
join
(
self
.
content
)
text
=
self
.
text
.
format
(
api_name
=
" "
.
join
(
self
.
content
))
text
=
f
"The
{
api_name
}
is in Beta stage, and backward compatibility is not guaranteed."
return
[
nodes
.
warning
(
""
,
nodes
.
paragraph
(
""
,
""
,
nodes
.
Text
(
text
)))]
return
[
nodes
.
warning
(
""
,
nodes
.
paragraph
(
""
,
""
,
nodes
.
Text
(
text
)))]
class
V2BetaStatus
(
BetaStatus
):
text
=
(
"The {api_name} is in Beta stage, and while we do not expect major breaking changes, "
"some APIs may still change according to user feedback. Please submit any feedback you may have "
"in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check "
"out https://github.com/pytorch/vision/issues/7319 to learn "
"more about the APIs that we suspect might involve future changes."
)
def
setup
(
app
):
def
setup
(
app
):
app
.
add_directive
(
"betastatus"
,
BetaStatus
)
app
.
add_directive
(
"betastatus"
,
BetaStatus
)
app
.
add_directive
(
"v2betastatus"
,
V2BetaStatus
)
return
{
return
{
"version"
:
"0.1"
,
"version"
:
"0.1"
,
"parallel_read_safe"
:
True
,
"parallel_read_safe"
:
True
,
...
...
docs/source/transforms.rst
View file @
a27522cb
...
@@ -16,10 +16,10 @@ Transforming and augmenting images
...
@@ -16,10 +16,10 @@ Transforming and augmenting images
:ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`.
:ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`.
Note that these transforms are still BETA, and while we don't expect major
Note that these transforms are still BETA, and while we don't expect major
breaking changes in the future, some APIs may still change according to user
breaking changes in the future, some APIs may still change according to user
feedback. Please submit any feedback you may have
in
feedback. Please submit any feedback you may have
`here
https://github.com/pytorch/vision/issues/6753, and you can also check
out
<
https://github.com/pytorch/vision/issues/6753
>`_
, and you can also check
https://github.com/pytorch/vision/issues/7319 to learn
more about the APIs
out `this issue <
https://github.com/pytorch/vision/issues/7319
>`_
to learn
that we suspect might involve future changes.
more about the APIs
that we suspect might involve future changes.
Transforms are common image transformations available in the
Transforms are common image transformations available in the
``torchvision.transforms`` module. They can be chained together using
``torchvision.transforms`` module. They can be chained together using
...
...
torchvision/__init__.py
View file @
a27522cb
...
@@ -100,10 +100,11 @@ def _is_tracing():
...
@@ -100,10 +100,11 @@ def _is_tracing():
_WARN_ABOUT_BETA_TRANSFORMS
=
True
_WARN_ABOUT_BETA_TRANSFORMS
=
True
_BETA_TRANSFORMS_WARNING
=
(
_BETA_TRANSFORMS_WARNING
=
(
"The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. "
"The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. "
"While we will try our best to maintain backward compatibility, "
"While we do not expect major breaking changes, some APIs may still change "
"some APIs or behaviors might change without a deprecation cycle. "
"according to user feedback. Please submit any feedback you may have in "
"To help us improve these new features, please provide your feedback "
"this issue: https://github.com/pytorch/vision/issues/6753, and you can also "
"here: https://github.com/pytorch/vision/issues/6753."
"check out https://github.com/pytorch/vision/issues/7319 to learn more about "
"the APIs that we suspect might involve future changes. "
"You can silence this warning by calling torchvision.disable_beta_transform_warning()."
"You can silence this warning by calling torchvision.disable_beta_transform_warning()."
)
)
...
...
torchvision/transforms/v2/_augment.py
View file @
a27522cb
...
@@ -15,7 +15,7 @@ from .utils import is_simple_tensor, query_chw
...
@@ -15,7 +15,7 @@ from .utils import is_simple_tensor, query_chw
class
RandomErasing
(
_RandomApplyTransform
):
class
RandomErasing
(
_RandomApplyTransform
):
"""[BETA] Randomly select a rectangle region in the input image or video and erase its pixels.
"""[BETA] Randomly select a rectangle region in the input image or video and erase its pixels.
.. betastatus:: RandomErasing transform
..
v2
betastatus:: RandomErasing transform
This transform does not support PIL Image.
This transform does not support PIL Image.
'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896
'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896
...
...
torchvision/transforms/v2/_auto_augment.py
View file @
a27522cb
...
@@ -165,7 +165,7 @@ class AutoAugment(_AutoAugmentBase):
...
@@ -165,7 +165,7 @@ class AutoAugment(_AutoAugmentBase):
r
"""[BETA] AutoAugment data augmentation method based on
r
"""[BETA] AutoAugment data augmentation method based on
`"AutoAugment: Learning Augmentation Strategies from Data" <https://arxiv.org/pdf/1805.09501.pdf>`_.
`"AutoAugment: Learning Augmentation Strategies from Data" <https://arxiv.org/pdf/1805.09501.pdf>`_.
.. betastatus:: AutoAugment transform
..
v2
betastatus:: AutoAugment transform
This transformation works on images and videos only.
This transformation works on images and videos only.
...
@@ -342,7 +342,7 @@ class RandAugment(_AutoAugmentBase):
...
@@ -342,7 +342,7 @@ class RandAugment(_AutoAugmentBase):
`"RandAugment: Practical automated data augmentation with a reduced search space"
`"RandAugment: Practical automated data augmentation with a reduced search space"
<https://arxiv.org/abs/1909.13719>`_.
<https://arxiv.org/abs/1909.13719>`_.
.. betastatus:: RandAugment transform
..
v2
betastatus:: RandAugment transform
This transformation works on images and videos only.
This transformation works on images and videos only.
...
@@ -425,7 +425,7 @@ class TrivialAugmentWide(_AutoAugmentBase):
...
@@ -425,7 +425,7 @@ class TrivialAugmentWide(_AutoAugmentBase):
r
"""[BETA] Dataset-independent data-augmentation with TrivialAugment Wide, as described in
r
"""[BETA] Dataset-independent data-augmentation with TrivialAugment Wide, as described in
`"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" <https://arxiv.org/abs/2103.10158>`_.
`"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" <https://arxiv.org/abs/2103.10158>`_.
.. betastatus:: TrivialAugmentWide transform
..
v2
betastatus:: TrivialAugmentWide transform
This transformation works on images and videos only.
This transformation works on images and videos only.
...
@@ -496,7 +496,7 @@ class AugMix(_AutoAugmentBase):
...
@@ -496,7 +496,7 @@ class AugMix(_AutoAugmentBase):
r
"""[BETA] AugMix data augmentation method based on
r
"""[BETA] AugMix data augmentation method based on
`"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" <https://arxiv.org/abs/1912.02781>`_.
`"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" <https://arxiv.org/abs/1912.02781>`_.
.. betastatus:: AugMix transform
..
v2
betastatus:: AugMix transform
This transformation works on images and videos only.
This transformation works on images and videos only.
...
...
torchvision/transforms/v2/_color.py
View file @
a27522cb
...
@@ -13,7 +13,7 @@ from .utils import is_simple_tensor, query_chw
...
@@ -13,7 +13,7 @@ from .utils import is_simple_tensor, query_chw
class
Grayscale
(
Transform
):
class
Grayscale
(
Transform
):
"""[BETA] Convert images or videos to grayscale.
"""[BETA] Convert images or videos to grayscale.
.. betastatus:: Grayscale transform
..
v2
betastatus:: Grayscale transform
If the input is a :class:`torch.Tensor`, it is expected
If the input is a :class:`torch.Tensor`, it is expected
to have [..., 3 or 1, H, W] shape, where ... means an arbitrary number of leading dimensions
to have [..., 3 or 1, H, W] shape, where ... means an arbitrary number of leading dimensions
...
@@ -42,7 +42,7 @@ class Grayscale(Transform):
...
@@ -42,7 +42,7 @@ class Grayscale(Transform):
class
RandomGrayscale
(
_RandomApplyTransform
):
class
RandomGrayscale
(
_RandomApplyTransform
):
"""[BETA] Randomly convert image or videos to grayscale with a probability of p (default 0.1).
"""[BETA] Randomly convert image or videos to grayscale with a probability of p (default 0.1).
.. betastatus:: RandomGrayscale transform
..
v2
betastatus:: RandomGrayscale transform
If the input is a :class:`torch.Tensor`, it is expected to have [..., 3 or 1, H, W] shape,
If the input is a :class:`torch.Tensor`, it is expected to have [..., 3 or 1, H, W] shape,
where ... means an arbitrary number of leading dimensions
where ... means an arbitrary number of leading dimensions
...
@@ -76,7 +76,7 @@ class RandomGrayscale(_RandomApplyTransform):
...
@@ -76,7 +76,7 @@ class RandomGrayscale(_RandomApplyTransform):
class
ColorJitter
(
Transform
):
class
ColorJitter
(
Transform
):
"""[BETA] Randomly change the brightness, contrast, saturation and hue of an image or video.
"""[BETA] Randomly change the brightness, contrast, saturation and hue of an image or video.
.. betastatus:: ColorJitter transform
..
v2
betastatus:: ColorJitter transform
If the input is a :class:`torch.Tensor`, it is expected
If the input is a :class:`torch.Tensor`, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
...
@@ -182,7 +182,7 @@ class RandomPhotometricDistort(Transform):
...
@@ -182,7 +182,7 @@ class RandomPhotometricDistort(Transform):
"""[BETA] Randomly distorts the image or video as used in `SSD: Single Shot
"""[BETA] Randomly distorts the image or video as used in `SSD: Single Shot
MultiBox Detector <https://arxiv.org/abs/1512.02325>`_.
MultiBox Detector <https://arxiv.org/abs/1512.02325>`_.
.. betastatus:: RandomPhotometricDistort transform
..
v2
betastatus:: RandomPhotometricDistort transform
This transform relies on :class:`~torchvision.transforms.v2.ColorJitter`
This transform relies on :class:`~torchvision.transforms.v2.ColorJitter`
under the hood to adjust the contrast, saturation, hue, brightness, and also
under the hood to adjust the contrast, saturation, hue, brightness, and also
...
@@ -282,7 +282,7 @@ class RandomPhotometricDistort(Transform):
...
@@ -282,7 +282,7 @@ class RandomPhotometricDistort(Transform):
class
RandomEqualize
(
_RandomApplyTransform
):
class
RandomEqualize
(
_RandomApplyTransform
):
"""[BETA] Equalize the histogram of the given image or video with a given probability.
"""[BETA] Equalize the histogram of the given image or video with a given probability.
.. betastatus:: RandomEqualize transform
..
v2
betastatus:: RandomEqualize transform
If the input is a :class:`torch.Tensor`, it is expected
If the input is a :class:`torch.Tensor`, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
...
@@ -301,7 +301,7 @@ class RandomEqualize(_RandomApplyTransform):
...
@@ -301,7 +301,7 @@ class RandomEqualize(_RandomApplyTransform):
class
RandomInvert
(
_RandomApplyTransform
):
class
RandomInvert
(
_RandomApplyTransform
):
"""[BETA] Inverts the colors of the given image or video with a given probability.
"""[BETA] Inverts the colors of the given image or video with a given probability.
.. betastatus:: RandomInvert transform
..
v2
betastatus:: RandomInvert transform
If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
where ... means it can have an arbitrary number of leading dimensions.
...
@@ -321,7 +321,7 @@ class RandomPosterize(_RandomApplyTransform):
...
@@ -321,7 +321,7 @@ class RandomPosterize(_RandomApplyTransform):
"""[BETA] Posterize the image or video with a given probability by reducing the
"""[BETA] Posterize the image or video with a given probability by reducing the
number of bits for each color channel.
number of bits for each color channel.
.. betastatus:: RandomPosterize transform
..
v2
betastatus:: RandomPosterize transform
If the input is a :class:`torch.Tensor`, it should be of type torch.uint8,
If the input is a :class:`torch.Tensor`, it should be of type torch.uint8,
and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
...
@@ -346,7 +346,7 @@ class RandomSolarize(_RandomApplyTransform):
...
@@ -346,7 +346,7 @@ class RandomSolarize(_RandomApplyTransform):
"""[BETA] Solarize the image or video with a given probability by inverting all pixel
"""[BETA] Solarize the image or video with a given probability by inverting all pixel
values above a threshold.
values above a threshold.
.. betastatus:: RandomSolarize transform
..
v2
betastatus:: RandomSolarize transform
If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
where ... means it can have an arbitrary number of leading dimensions.
...
@@ -370,7 +370,7 @@ class RandomSolarize(_RandomApplyTransform):
...
@@ -370,7 +370,7 @@ class RandomSolarize(_RandomApplyTransform):
class
RandomAutocontrast
(
_RandomApplyTransform
):
class
RandomAutocontrast
(
_RandomApplyTransform
):
"""[BETA] Autocontrast the pixels of the given image or video with a given probability.
"""[BETA] Autocontrast the pixels of the given image or video with a given probability.
.. betastatus:: RandomAutocontrast transform
..
v2
betastatus:: RandomAutocontrast transform
If the input is a :class:`torch.Tensor`, it is expected
If the input is a :class:`torch.Tensor`, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
...
@@ -389,7 +389,7 @@ class RandomAutocontrast(_RandomApplyTransform):
...
@@ -389,7 +389,7 @@ class RandomAutocontrast(_RandomApplyTransform):
class
RandomAdjustSharpness
(
_RandomApplyTransform
):
class
RandomAdjustSharpness
(
_RandomApplyTransform
):
"""[BETA] Adjust the sharpness of the image or video with a given probability.
"""[BETA] Adjust the sharpness of the image or video with a given probability.
.. betastatus:: RandomAdjustSharpness transform
..
v2
betastatus:: RandomAdjustSharpness transform
If the input is a :class:`torch.Tensor`,
If the input is a :class:`torch.Tensor`,
it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
...
...
torchvision/transforms/v2/_container.py
View file @
a27522cb
...
@@ -10,7 +10,7 @@ from torchvision.transforms.v2 import Transform
...
@@ -10,7 +10,7 @@ from torchvision.transforms.v2 import Transform
class
Compose
(
Transform
):
class
Compose
(
Transform
):
"""[BETA] Composes several transforms together.
"""[BETA] Composes several transforms together.
.. betastatus:: Compose transform
..
v2
betastatus:: Compose transform
This transform does not support torchscript.
This transform does not support torchscript.
Please, see the note below.
Please, see the note below.
...
@@ -61,7 +61,7 @@ class Compose(Transform):
...
@@ -61,7 +61,7 @@ class Compose(Transform):
class
RandomApply
(
Transform
):
class
RandomApply
(
Transform
):
"""[BETA] Apply randomly a list of transformations with a given probability.
"""[BETA] Apply randomly a list of transformations with a given probability.
.. betastatus:: RandomApply transform
..
v2
betastatus:: RandomApply transform
.. note::
.. note::
In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of
In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of
...
@@ -116,7 +116,7 @@ class RandomApply(Transform):
...
@@ -116,7 +116,7 @@ class RandomApply(Transform):
class
RandomChoice
(
Transform
):
class
RandomChoice
(
Transform
):
"""[BETA] Apply single transformation randomly picked from a list.
"""[BETA] Apply single transformation randomly picked from a list.
.. betastatus:: RandomChoice transform
..
v2
betastatus:: RandomChoice transform
This transform does not support torchscript.
This transform does not support torchscript.
...
@@ -155,7 +155,7 @@ class RandomChoice(Transform):
...
@@ -155,7 +155,7 @@ class RandomChoice(Transform):
class
RandomOrder
(
Transform
):
class
RandomOrder
(
Transform
):
"""[BETA] Apply a list of transformations in a random order.
"""[BETA] Apply a list of transformations in a random order.
.. betastatus:: RandomOrder transform
..
v2
betastatus:: RandomOrder transform
This transform does not support torchscript.
This transform does not support torchscript.
...
...
torchvision/transforms/v2/_deprecated.py
View file @
a27522cb
...
@@ -12,7 +12,7 @@ from torchvision.transforms.v2 import Transform
...
@@ -12,7 +12,7 @@ from torchvision.transforms.v2 import Transform
class
ToTensor
(
Transform
):
class
ToTensor
(
Transform
):
"""[BETA] Convert a PIL Image or ndarray to tensor and scale the values accordingly.
"""[BETA] Convert a PIL Image or ndarray to tensor and scale the values accordingly.
.. betastatus:: ToTensor transform
..
v2
betastatus:: ToTensor transform
.. warning::
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
...
...
torchvision/transforms/v2/_geometry.py
View file @
a27522cb
...
@@ -28,7 +28,7 @@ from .utils import has_all, has_any, is_simple_tensor, query_bounding_box, query
...
@@ -28,7 +28,7 @@ from .utils import has_all, has_any, is_simple_tensor, query_bounding_box, query
class
RandomHorizontalFlip
(
_RandomApplyTransform
):
class
RandomHorizontalFlip
(
_RandomApplyTransform
):
"""[BETA] Horizontally flip the input with a given probability.
"""[BETA] Horizontally flip the input with a given probability.
.. betastatus:: RandomHorizontalFlip transform
..
v2
betastatus:: RandomHorizontalFlip transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -48,7 +48,7 @@ class RandomHorizontalFlip(_RandomApplyTransform):
...
@@ -48,7 +48,7 @@ class RandomHorizontalFlip(_RandomApplyTransform):
class
RandomVerticalFlip
(
_RandomApplyTransform
):
class
RandomVerticalFlip
(
_RandomApplyTransform
):
"""[BETA] Vertically flip the input with a given probability.
"""[BETA] Vertically flip the input with a given probability.
.. betastatus:: RandomVerticalFlip transform
..
v2
betastatus:: RandomVerticalFlip transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -68,7 +68,7 @@ class RandomVerticalFlip(_RandomApplyTransform):
...
@@ -68,7 +68,7 @@ class RandomVerticalFlip(_RandomApplyTransform):
class
Resize
(
Transform
):
class
Resize
(
Transform
):
"""[BETA] Resize the input to the given size.
"""[BETA] Resize the input to the given size.
.. betastatus:: Resize transform
..
v2
betastatus:: Resize transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -162,7 +162,7 @@ class Resize(Transform):
...
@@ -162,7 +162,7 @@ class Resize(Transform):
class
CenterCrop
(
Transform
):
class
CenterCrop
(
Transform
):
"""[BETA] Crop the input at the center.
"""[BETA] Crop the input at the center.
.. betastatus:: CenterCrop transform
..
v2
betastatus:: CenterCrop transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -190,7 +190,7 @@ class CenterCrop(Transform):
...
@@ -190,7 +190,7 @@ class CenterCrop(Transform):
class
RandomResizedCrop
(
Transform
):
class
RandomResizedCrop
(
Transform
):
"""[BETA] Crop a random portion of the input and resize it to a given size.
"""[BETA] Crop a random portion of the input and resize it to a given size.
.. betastatus:: RandomResizedCrop transform
..
v2
betastatus:: RandomResizedCrop transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -316,7 +316,7 @@ ImageOrVideoTypeJIT = Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]
...
@@ -316,7 +316,7 @@ ImageOrVideoTypeJIT = Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]
class
FiveCrop
(
Transform
):
class
FiveCrop
(
Transform
):
"""[BETA] Crop the image or video into four corners and the central crop.
"""[BETA] Crop the image or video into four corners and the central crop.
.. betastatus:: FiveCrop transform
..
v2
betastatus:: FiveCrop transform
If the input is a :class:`torch.Tensor` or a :class:`~torchvision.datapoints.Image` or a
If the input is a :class:`torch.Tensor` or a :class:`~torchvision.datapoints.Image` or a
:class:`~torchvision.datapoints.Video` it can have arbitrary number of leading batch dimensions.
:class:`~torchvision.datapoints.Video` it can have arbitrary number of leading batch dimensions.
...
@@ -379,7 +379,7 @@ class TenCrop(Transform):
...
@@ -379,7 +379,7 @@ class TenCrop(Transform):
"""[BETA] Crop the image or video into four corners and the central crop plus the flipped version of
"""[BETA] Crop the image or video into four corners and the central crop plus the flipped version of
these (horizontal flipping is used by default).
these (horizontal flipping is used by default).
.. betastatus:: TenCrop transform
..
v2
betastatus:: TenCrop transform
If the input is a :class:`torch.Tensor` or a :class:`~torchvision.datapoints.Image` or a
If the input is a :class:`torch.Tensor` or a :class:`~torchvision.datapoints.Image` or a
:class:`~torchvision.datapoints.Video` it can have arbitrary number of leading batch dimensions.
:class:`~torchvision.datapoints.Video` it can have arbitrary number of leading batch dimensions.
...
@@ -437,7 +437,7 @@ class TenCrop(Transform):
...
@@ -437,7 +437,7 @@ class TenCrop(Transform):
class
Pad
(
Transform
):
class
Pad
(
Transform
):
"""[BETA] Pad the input on all sides with the given "pad" value.
"""[BETA] Pad the input on all sides with the given "pad" value.
.. betastatus:: Pad transform
..
v2
betastatus:: Pad transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -512,7 +512,7 @@ class RandomZoomOut(_RandomApplyTransform):
...
@@ -512,7 +512,7 @@ class RandomZoomOut(_RandomApplyTransform):
"""[BETA] "Zoom out" transformation from
"""[BETA] "Zoom out" transformation from
`"SSD: Single Shot MultiBox Detector" <https://arxiv.org/abs/1512.02325>`_.
`"SSD: Single Shot MultiBox Detector" <https://arxiv.org/abs/1512.02325>`_.
.. betastatus:: RandomZoomOut transform
..
v2
betastatus:: RandomZoomOut transform
This transformation randomly pads images, videos, bounding boxes and masks creating a zoom out effect.
This transformation randomly pads images, videos, bounding boxes and masks creating a zoom out effect.
Output spatial size is randomly sampled from original size up to a maximum size configured
Output spatial size is randomly sampled from original size up to a maximum size configured
...
@@ -581,7 +581,7 @@ class RandomZoomOut(_RandomApplyTransform):
...
@@ -581,7 +581,7 @@ class RandomZoomOut(_RandomApplyTransform):
class
RandomRotation
(
Transform
):
class
RandomRotation
(
Transform
):
"""[BETA] Rotate the input by angle.
"""[BETA] Rotate the input by angle.
.. betastatus:: RandomRotation transform
..
v2
betastatus:: RandomRotation transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -654,7 +654,7 @@ class RandomRotation(Transform):
...
@@ -654,7 +654,7 @@ class RandomRotation(Transform):
class
RandomAffine
(
Transform
):
class
RandomAffine
(
Transform
):
"""[BETA] Random affine transformation the input keeping center invariant.
"""[BETA] Random affine transformation the input keeping center invariant.
.. betastatus:: RandomAffine transform
..
v2
betastatus:: RandomAffine transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -775,7 +775,7 @@ class RandomAffine(Transform):
...
@@ -775,7 +775,7 @@ class RandomAffine(Transform):
class
RandomCrop
(
Transform
):
class
RandomCrop
(
Transform
):
"""[BETA] Crop the input at a random location.
"""[BETA] Crop the input at a random location.
.. betastatus:: RandomCrop transform
..
v2
betastatus:: RandomCrop transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -930,7 +930,7 @@ class RandomCrop(Transform):
...
@@ -930,7 +930,7 @@ class RandomCrop(Transform):
class
RandomPerspective
(
_RandomApplyTransform
):
class
RandomPerspective
(
_RandomApplyTransform
):
"""[BETA] Perform a random perspective transformation of the input with a given probability.
"""[BETA] Perform a random perspective transformation of the input with a given probability.
.. betastatus:: RandomPerspective transform
..
v2
betastatus:: RandomPerspective transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -1016,7 +1016,7 @@ class RandomPerspective(_RandomApplyTransform):
...
@@ -1016,7 +1016,7 @@ class RandomPerspective(_RandomApplyTransform):
class
ElasticTransform
(
Transform
):
class
ElasticTransform
(
Transform
):
"""[BETA] Transform the input with elastic transformations.
"""[BETA] Transform the input with elastic transformations.
.. betastatus:: RandomPerspective transform
..
v2
betastatus:: RandomPerspective transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -1108,7 +1108,7 @@ class RandomIoUCrop(Transform):
...
@@ -1108,7 +1108,7 @@ class RandomIoUCrop(Transform):
"""[BETA] Random IoU crop transformation from
"""[BETA] Random IoU crop transformation from
`"SSD: Single Shot MultiBox Detector" <https://arxiv.org/abs/1512.02325>`_.
`"SSD: Single Shot MultiBox Detector" <https://arxiv.org/abs/1512.02325>`_.
.. betastatus:: RandomIoUCrop transform
..
v2
betastatus:: RandomIoUCrop transform
This transformation requires an image or video data and ``datapoints.BoundingBox`` in the input.
This transformation requires an image or video data and ``datapoints.BoundingBox`` in the input.
...
@@ -1232,7 +1232,7 @@ class ScaleJitter(Transform):
...
@@ -1232,7 +1232,7 @@ class ScaleJitter(Transform):
"""[BETA] Perform Large Scale Jitter on the input according to
"""[BETA] Perform Large Scale Jitter on the input according to
`"Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" <https://arxiv.org/abs/2012.07177>`_.
`"Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" <https://arxiv.org/abs/2012.07177>`_.
.. betastatus:: ScaleJitter transform
..
v2
betastatus:: ScaleJitter transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -1298,7 +1298,7 @@ class ScaleJitter(Transform):
...
@@ -1298,7 +1298,7 @@ class ScaleJitter(Transform):
class
RandomShortestSize
(
Transform
):
class
RandomShortestSize
(
Transform
):
"""[BETA] Randomly resize the input.
"""[BETA] Randomly resize the input.
.. betastatus:: RandomShortestSize transform
..
v2
betastatus:: RandomShortestSize transform
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`,
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
:class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.)
...
@@ -1366,7 +1366,7 @@ class RandomShortestSize(Transform):
...
@@ -1366,7 +1366,7 @@ class RandomShortestSize(Transform):
class
RandomResize
(
Transform
):
class
RandomResize
(
Transform
):
"""[BETA] Randomly resize the input.
"""[BETA] Randomly resize the input.
.. betastatus:: RandomResize transform
..
v2
betastatus:: RandomResize transform
This transformation can be used together with ``RandomCrop`` as data augmentations to train
This transformation can be used together with ``RandomCrop`` as data augmentations to train
models on image segmentation task.
models on image segmentation task.
...
...
torchvision/transforms/v2/_meta.py
View file @
a27522cb
...
@@ -11,7 +11,7 @@ from .utils import is_simple_tensor
...
@@ -11,7 +11,7 @@ from .utils import is_simple_tensor
class
ConvertBoundingBoxFormat
(
Transform
):
class
ConvertBoundingBoxFormat
(
Transform
):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. betastatus:: ConvertBoundingBoxFormat transform
..
v2
betastatus:: ConvertBoundingBoxFormat transform
Args:
Args:
format (str or datapoints.BoundingBoxFormat): output bounding box format.
format (str or datapoints.BoundingBoxFormat): output bounding box format.
...
@@ -34,7 +34,7 @@ class ConvertBoundingBoxFormat(Transform):
...
@@ -34,7 +34,7 @@ class ConvertBoundingBoxFormat(Transform):
class
ConvertDtype
(
Transform
):
class
ConvertDtype
(
Transform
):
"""[BETA] Convert input image or video to the given ``dtype`` and scale the values accordingly.
"""[BETA] Convert input image or video to the given ``dtype`` and scale the values accordingly.
.. betastatus:: ConvertDtype transform
..
v2
betastatus:: ConvertDtype transform
This function does not support PIL Image.
This function does not support PIL Image.
...
@@ -77,7 +77,7 @@ class ClampBoundingBox(Transform):
...
@@ -77,7 +77,7 @@ class ClampBoundingBox(Transform):
The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.
The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.
.. betastatus:: ClampBoundingBox transform
..
v2
betastatus:: ClampBoundingBox transform
"""
"""
...
...
torchvision/transforms/v2/_misc.py
View file @
a27522cb
...
@@ -24,7 +24,7 @@ class Identity(Transform):
...
@@ -24,7 +24,7 @@ class Identity(Transform):
class
Lambda
(
Transform
):
class
Lambda
(
Transform
):
"""[BETA] Apply a user-defined function as a transform.
"""[BETA] Apply a user-defined function as a transform.
.. betastatus:: Lambda transform
..
v2
betastatus:: Lambda transform
This transform does not support torchscript.
This transform does not support torchscript.
...
@@ -55,7 +55,7 @@ class Lambda(Transform):
...
@@ -55,7 +55,7 @@ class Lambda(Transform):
class
LinearTransformation
(
Transform
):
class
LinearTransformation
(
Transform
):
"""[BETA] Transform a tensor image or video with a square transformation matrix and a mean_vector computed offline.
"""[BETA] Transform a tensor image or video with a square transformation matrix and a mean_vector computed offline.
.. betastatus:: LinearTransformation transform
..
v2
betastatus:: LinearTransformation transform
This transform does not support PIL Image.
This transform does not support PIL Image.
Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
...
@@ -138,7 +138,7 @@ class LinearTransformation(Transform):
...
@@ -138,7 +138,7 @@ class LinearTransformation(Transform):
class
Normalize
(
Transform
):
class
Normalize
(
Transform
):
"""[BETA] Normalize a tensor image or video with mean and standard deviation.
"""[BETA] Normalize a tensor image or video with mean and standard deviation.
.. betastatus:: Normalize transform
..
v2
betastatus:: Normalize transform
This transform does not support PIL Image.
This transform does not support PIL Image.
Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
...
@@ -178,7 +178,7 @@ class Normalize(Transform):
...
@@ -178,7 +178,7 @@ class Normalize(Transform):
class
GaussianBlur
(
Transform
):
class
GaussianBlur
(
Transform
):
"""[BETA] Blurs image with randomly chosen Gaussian blur.
"""[BETA] Blurs image with randomly chosen Gaussian blur.
.. betastatus:: GausssianBlur transform
..
v2
betastatus:: GausssianBlur transform
If the input is a Tensor, it is expected
If the input is a Tensor, it is expected
to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.
to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.
...
@@ -225,7 +225,7 @@ class GaussianBlur(Transform):
...
@@ -225,7 +225,7 @@ class GaussianBlur(Transform):
class
ToDtype
(
Transform
):
class
ToDtype
(
Transform
):
"""[BETA] Converts the input to a specific dtype - this does not scale values.
"""[BETA] Converts the input to a specific dtype - this does not scale values.
.. betastatus:: ToDtype transform
..
v2
betastatus:: ToDtype transform
Args:
Args:
dtype (``torch.dtype`` or dict of ``Datapoint`` -> ``torch.dtype``): The dtype to convert to.
dtype (``torch.dtype`` or dict of ``Datapoint`` -> ``torch.dtype``): The dtype to convert to.
...
@@ -258,7 +258,7 @@ class ToDtype(Transform):
...
@@ -258,7 +258,7 @@ class ToDtype(Transform):
class
SanitizeBoundingBox
(
Transform
):
class
SanitizeBoundingBox
(
Transform
):
"""[BETA] Remove degenerate/invalid bounding boxes and their corresponding labels and masks.
"""[BETA] Remove degenerate/invalid bounding boxes and their corresponding labels and masks.
.. betastatus:: SanitizeBoundingBox transform
..
v2
betastatus:: SanitizeBoundingBox transform
This transform removes bounding boxes and their associated labels/masks that:
This transform removes bounding boxes and their associated labels/masks that:
...
...
torchvision/transforms/v2/_temporal.py
View file @
a27522cb
...
@@ -9,7 +9,7 @@ from torchvision.transforms.v2.utils import is_simple_tensor
...
@@ -9,7 +9,7 @@ from torchvision.transforms.v2.utils import is_simple_tensor
class
UniformTemporalSubsample
(
Transform
):
class
UniformTemporalSubsample
(
Transform
):
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
.. betastatus:: UniformTemporalSubsample transform
..
v2
betastatus:: UniformTemporalSubsample transform
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
...
...
torchvision/transforms/v2/_type_conversion.py
View file @
a27522cb
...
@@ -13,7 +13,7 @@ from torchvision.transforms.v2.utils import is_simple_tensor
...
@@ -13,7 +13,7 @@ from torchvision.transforms.v2.utils import is_simple_tensor
class
PILToTensor
(
Transform
):
class
PILToTensor
(
Transform
):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. betastatus:: PILToTensor transform
..
v2
betastatus:: PILToTensor transform
This transform does not support torchscript.
This transform does not support torchscript.
...
@@ -30,7 +30,7 @@ class ToImageTensor(Transform):
...
@@ -30,7 +30,7 @@ class ToImageTensor(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
; this does not scale values.
; this does not scale values.
.. betastatus:: ToImageTensor transform
..
v2
betastatus:: ToImageTensor transform
This transform does not support torchscript.
This transform does not support torchscript.
"""
"""
...
@@ -46,7 +46,7 @@ class ToImageTensor(Transform):
...
@@ -46,7 +46,7 @@ class ToImageTensor(Transform):
class
ToImagePIL
(
Transform
):
class
ToImagePIL
(
Transform
):
"""[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values.
"""[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values.
.. betastatus:: ToImagePIL transform
..
v2
betastatus:: ToImagePIL transform
This transform does not support torchscript.
This transform does not support torchscript.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment