_meta.py 3.23 KB
Newer Older
1
from typing import Any, Dict, Union
Philip Meier's avatar
Philip Meier committed
2

3
import torch
4

5
6
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
7

8
9
from .utils import is_simple_tensor

10
11

class ConvertBoundingBoxFormat(Transform):
vfdev's avatar
vfdev committed
12
13
14
15
16
17
18
19
20
    """[BETA] Convert bounding box coordinates to the given ``format``, e.g. from "CXCYWH" to "XYXY".

    .. betastatus:: ConvertBoundingBoxFormat transform

    Args:
        format (str or datapoints.BoundingBoxFormat): output bounding box format.
            Possible values are defined by :class:`~torchvision.datapoints.BoundingBoxFormat` and
            string values match the enums, e.g. "XYXY" or "XYWH" etc.
    """
21
    _transformed_types = (datapoints.BoundingBox,)
22

23
    def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
24
25
        super().__init__()
        if isinstance(format, str):
26
            format = datapoints.BoundingBoxFormat[format]
27
28
        self.format = format

29
    def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
30
        return F.convert_format_bounding_box(inpt, new_format=self.format)  # type: ignore[return-value]
31
32


33
class ConvertDtype(Transform):
vfdev's avatar
vfdev committed
34
    """[BETA] Convert input image or video to the given ``dtype`` and scale the values accordingly.
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54

    .. betastatus:: ConvertDtype transform

    This function does not support PIL Image.

    Args:
        dtype (torch.dtype): Desired data type of the output

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """

55
56
    _v1_transform_cls = _transforms.ConvertImageDtype

57
    _transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
58

59
60
61
62
    def __init__(self, dtype: torch.dtype = torch.float32) -> None:
        super().__init__()
        self.dtype = dtype

63
    def _transform(
Philip Meier's avatar
Philip Meier committed
64
65
        self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
    ) -> Union[datapoints._TensorImageType, datapoints._TensorVideoType]:
66
67
68
69
70
71
        return F.convert_dtype(inpt, self.dtype)


# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
72
73


74
class ClampBoundingBox(Transform):
vfdev's avatar
vfdev committed
75
76
77
78
79
80
81
    """[BETA] Clamp bounding boxes to their corresponding image dimensions.

    The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.

    .. betastatus:: ClampBoundingBox transform

    """
82
    _transformed_types = (datapoints.BoundingBox,)
83

84
    def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
85
        return F.clamp_bounding_box(inpt)  # type: ignore[return-value]