_meta.py 3.23 KB
Newer Older
1
from typing import Any, Dict, Union
Philip Meier's avatar
Philip Meier committed
2

3
import torch
4

5
6
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
7

8
9
from .utils import is_simple_tensor

10
11

class ConvertBoundingBoxFormat(Transform):
Nicolas Hug's avatar
Nicolas Hug committed
12
    """[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
vfdev's avatar
vfdev committed
13
14
15
16
17
18
19
20

    .. betastatus:: ConvertBoundingBoxFormat transform

    Args:
        format (str or datapoints.BoundingBoxFormat): output bounding box format.
            Possible values are defined by :class:`~torchvision.datapoints.BoundingBoxFormat` and
            string values match the enums, e.g. "XYXY" or "XYWH" etc.
    """
Nicolas Hug's avatar
Nicolas Hug committed
21

22
    _transformed_types = (datapoints.BoundingBox,)
23

24
    def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
25
26
        super().__init__()
        if isinstance(format, str):
27
            format = datapoints.BoundingBoxFormat[format]
28
29
        self.format = format

30
    def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
31
        return F.convert_format_bounding_box(inpt, new_format=self.format)  # type: ignore[return-value]
32
33


34
class ConvertDtype(Transform):
vfdev's avatar
vfdev committed
35
    """[BETA] Convert input image or video to the given ``dtype`` and scale the values accordingly.
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55

    .. betastatus:: ConvertDtype transform

    This function does not support PIL Image.

    Args:
        dtype (torch.dtype): Desired data type of the output

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """

56
57
    _v1_transform_cls = _transforms.ConvertImageDtype

58
    _transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
59

60
61
62
63
    def __init__(self, dtype: torch.dtype = torch.float32) -> None:
        super().__init__()
        self.dtype = dtype

64
    def _transform(
Philip Meier's avatar
Philip Meier committed
65
66
        self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
    ) -> Union[datapoints._TensorImageType, datapoints._TensorVideoType]:
67
68
69
70
71
72
        return F.convert_dtype(inpt, self.dtype)


# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
73
74


75
class ClampBoundingBox(Transform):
vfdev's avatar
vfdev committed
76
77
78
79
80
81
82
    """[BETA] Clamp bounding boxes to their corresponding image dimensions.

    The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.

    .. betastatus:: ClampBoundingBox transform

    """
Nicolas Hug's avatar
Nicolas Hug committed
83

84
    _transformed_types = (datapoints.BoundingBox,)
85

86
    def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
87
        return F.clamp_bounding_box(inpt)  # type: ignore[return-value]