_meta.py 10.9 KB
Newer Older
1
from typing import List, Optional, Tuple, Union
2

3
4
import PIL.Image
import torch
5
6
from torchvision import datapoints
from torchvision.datapoints import BoundingBoxFormat
7
from torchvision.transforms import _functional_pil as _FP
8

9
10
from torchvision.utils import _log_api_usage_once

11
12
from ._utils import is_simple_tensor

13

14
15
16
17
18
19
20
21
22
23
24
25
def get_dimensions_image_tensor(image: torch.Tensor) -> List[int]:
    chw = list(image.shape[-3:])
    ndims = len(chw)
    if ndims == 3:
        return chw
    elif ndims == 2:
        chw.insert(0, 1)
        return chw
    else:
        raise TypeError(f"Input tensor should have at least two dimensions, but got {ndims}")


26
27
28
get_dimensions_image_pil = _FP.get_dimensions


Philip Meier's avatar
Philip Meier committed
29
30
31
32
def get_dimensions_video(video: torch.Tensor) -> List[int]:
    return get_dimensions_image_tensor(video)


Philip Meier's avatar
Philip Meier committed
33
def get_dimensions(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> List[int]:
34
35
36
    if not torch.jit.is_scripting():
        _log_api_usage_once(get_dimensions)

37
    if torch.jit.is_scripting() or is_simple_tensor(inpt):
38
        return get_dimensions_image_tensor(inpt)
Philip Meier's avatar
Philip Meier committed
39
40
41
42
43
44
45
46
47
48
49
50
51

    for typ, get_size_fn in {
        datapoints.Image: get_dimensions_image_tensor,
        datapoints.Video: get_dimensions_video,
        PIL.Image.Image: get_dimensions_image_pil,
    }.items():
        if isinstance(inpt, typ):
            return get_size_fn(inpt)

    raise TypeError(
        f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, "
        f"but got {type(inpt)} instead."
    )
52
53


54
55
56
57
58
59
60
61
62
63
64
def get_num_channels_image_tensor(image: torch.Tensor) -> int:
    chw = image.shape[-3:]
    ndims = len(chw)
    if ndims == 3:
        return chw[0]
    elif ndims == 2:
        return 1
    else:
        raise TypeError(f"Input tensor should have at least two dimensions, but got {ndims}")


65
get_num_channels_image_pil = _FP.get_image_num_channels
66
67


68
69
70
71
def get_num_channels_video(video: torch.Tensor) -> int:
    return get_num_channels_image_tensor(video)


Philip Meier's avatar
Philip Meier committed
72
def get_num_channels(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> int:
73
74
75
    if not torch.jit.is_scripting():
        _log_api_usage_once(get_num_channels)

76
    if torch.jit.is_scripting() or is_simple_tensor(inpt):
77
        return get_num_channels_image_tensor(inpt)
Philip Meier's avatar
Philip Meier committed
78
79
80
81
82
83
84
85
86
87
88
89
90

    for typ, get_size_fn in {
        datapoints.Image: get_num_channels_image_tensor,
        datapoints.Video: get_num_channels_video,
        PIL.Image.Image: get_num_channels_image_pil,
    }.items():
        if isinstance(inpt, typ):
            return get_size_fn(inpt)

    raise TypeError(
        f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, "
        f"but got {type(inpt)} instead."
    )
91
92


93
94
95
96
97
# We changed the names to ensure it can be used not only for images but also videos. Thus, we just alias it without
# deprecating the old names.
get_image_num_channels = get_num_channels


Philip Meier's avatar
Philip Meier committed
98
def get_size_image_tensor(image: torch.Tensor) -> List[int]:
99
100
101
102
103
104
    hw = list(image.shape[-2:])
    ndims = len(hw)
    if ndims == 2:
        return hw
    else:
        raise TypeError(f"Input tensor should have at least two dimensions, but got {ndims}")
105
106
107


@torch.jit.unused
Philip Meier's avatar
Philip Meier committed
108
def get_size_image_pil(image: PIL.Image.Image) -> List[int]:
109
110
111
112
    width, height = _FP.get_image_size(image)
    return [height, width]


Philip Meier's avatar
Philip Meier committed
113
114
def get_size_video(video: torch.Tensor) -> List[int]:
    return get_size_image_tensor(video)
115
116


Philip Meier's avatar
Philip Meier committed
117
118
def get_size_mask(mask: torch.Tensor) -> List[int]:
    return get_size_image_tensor(mask)
119
120
121


@torch.jit.unused
Philip Meier's avatar
Philip Meier committed
122
123
def get_size_bounding_boxes(bounding_box: datapoints.BoundingBoxes) -> List[int]:
    return list(bounding_box.canvas_size)
124
125


Philip Meier's avatar
Philip Meier committed
126
def get_size(inpt: datapoints._InputTypeJIT) -> List[int]:
127
    if not torch.jit.is_scripting():
Philip Meier's avatar
Philip Meier committed
128
        _log_api_usage_once(get_size)
129

130
    if torch.jit.is_scripting() or is_simple_tensor(inpt):
Philip Meier's avatar
Philip Meier committed
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
        return get_size_image_tensor(inpt)

    # TODO: This is just the poor mans version of a dispatcher. This will be properly addressed with
    # https://github.com/pytorch/vision/pull/7747 when we can register the kernels above without the need to have
    # a method on the datapoint class
    for typ, get_size_fn in {
        datapoints.Image: get_size_image_tensor,
        datapoints.BoundingBoxes: get_size_bounding_boxes,
        datapoints.Mask: get_size_mask,
        datapoints.Video: get_size_video,
        PIL.Image.Image: get_size_image_pil,
    }.items():
        if isinstance(inpt, typ):
            return get_size_fn(inpt)

    raise TypeError(
        f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, "
        f"but got {type(inpt)} instead."
    )
150
151
152
153
154
155


def get_num_frames_video(video: torch.Tensor) -> int:
    return video.shape[-4]


Philip Meier's avatar
Philip Meier committed
156
def get_num_frames(inpt: datapoints._VideoTypeJIT) -> int:
157
158
159
    if not torch.jit.is_scripting():
        _log_api_usage_once(get_num_frames)

160
    if torch.jit.is_scripting() or is_simple_tensor(inpt):
161
        return get_num_frames_video(inpt)
162
    elif isinstance(inpt, datapoints.Video):
Philip Meier's avatar
Philip Meier committed
163
        return get_num_frames_video(inpt)
164
    else:
165
        raise TypeError(f"Input can either be a plain tensor or a `Video` datapoint, but got {type(inpt)} instead.")
166
167


168
169
def _xywh_to_xyxy(xywh: torch.Tensor, inplace: bool) -> torch.Tensor:
    xyxy = xywh if inplace else xywh.clone()
170
171
172
173
    xyxy[..., 2:] += xyxy[..., :2]
    return xyxy


174
175
def _xyxy_to_xywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor:
    xywh = xyxy if inplace else xyxy.clone()
176
177
178
179
    xywh[..., 2:] -= xywh[..., :2]
    return xywh


180
181
182
def _cxcywh_to_xyxy(cxcywh: torch.Tensor, inplace: bool) -> torch.Tensor:
    if not inplace:
        cxcywh = cxcywh.clone()
183

184
185
186
187
188
189
190
    # Trick to do fast division by 2 and ceil, without casting. It produces the same result as
    # `torchvision.ops._box_convert._box_cxcywh_to_xyxy`.
    half_wh = cxcywh[..., 2:].div(-2, rounding_mode=None if cxcywh.is_floating_point() else "floor").abs_()
    # (cx - width / 2) = x1, same for y1
    cxcywh[..., :2].sub_(half_wh)
    # (x1 + width) = x2, same for y2
    cxcywh[..., 2:].add_(cxcywh[..., :2])
191

192
193
194
195
196
197
198
199
200
201
202
203
204
    return cxcywh


def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor:
    if not inplace:
        xyxy = xyxy.clone()

    # (x2 - x1) = width, same for height
    xyxy[..., 2:].sub_(xyxy[..., :2])
    # (x1 * 2 + width) / 2 = x1 + width / 2 = x1 + (x2-x1)/2 = (x1 + x2)/2 = cx, same for cy
    xyxy[..., :2].mul_(2).add_(xyxy[..., 2:]).div_(2, rounding_mode=None if xyxy.is_floating_point() else "floor")

    return xyxy
205
206


207
208
def _convert_format_bounding_boxes(
    bounding_boxes: torch.Tensor, old_format: BoundingBoxFormat, new_format: BoundingBoxFormat, inplace: bool = False
209
) -> torch.Tensor:
210

211
    if new_format == old_format:
212
        return bounding_boxes
213

214
    # TODO: Add _xywh_to_cxcywh and _cxcywh_to_xywh to improve performance
215
    if old_format == BoundingBoxFormat.XYWH:
216
        bounding_boxes = _xywh_to_xyxy(bounding_boxes, inplace)
217
    elif old_format == BoundingBoxFormat.CXCYWH:
218
        bounding_boxes = _cxcywh_to_xyxy(bounding_boxes, inplace)
219
220

    if new_format == BoundingBoxFormat.XYWH:
221
        bounding_boxes = _xyxy_to_xywh(bounding_boxes, inplace)
222
    elif new_format == BoundingBoxFormat.CXCYWH:
223
        bounding_boxes = _xyxy_to_cxcywh(bounding_boxes, inplace)
224

225
    return bounding_boxes
226
227


228
def convert_format_bounding_boxes(
Philip Meier's avatar
Philip Meier committed
229
    inpt: datapoints._InputTypeJIT,
230
231
232
    old_format: Optional[BoundingBoxFormat] = None,
    new_format: Optional[BoundingBoxFormat] = None,
    inplace: bool = False,
Philip Meier's avatar
Philip Meier committed
233
) -> datapoints._InputTypeJIT:
234
    # This being a kernel / dispatcher hybrid, we need an option to pass `old_format` explicitly for simple tensor
235
    # inputs as well as extract it from `datapoints.BoundingBoxes` inputs. However, putting a default value on
236
237
238
    # `old_format` means we also need to put one on `new_format` to have syntactically correct Python. Here we mimic the
    # default error that would be thrown if `new_format` had no default value.
    if new_format is None:
239
        raise TypeError("convert_format_bounding_boxes() missing 1 required argument: 'new_format'")
240
241

    if not torch.jit.is_scripting():
242
        _log_api_usage_once(convert_format_bounding_boxes)
243
244
245
246

    if torch.jit.is_scripting() or is_simple_tensor(inpt):
        if old_format is None:
            raise ValueError("For simple tensor inputs, `old_format` has to be passed.")
247
248
        return _convert_format_bounding_boxes(inpt, old_format=old_format, new_format=new_format, inplace=inplace)
    elif isinstance(inpt, datapoints.BoundingBoxes):
249
250
        if old_format is not None:
            raise ValueError("For bounding box datapoint inputs, `old_format` must not be passed.")
251
        output = _convert_format_bounding_boxes(
252
253
            inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=new_format, inplace=inplace
        )
254
        return datapoints.BoundingBoxes.wrap_like(inpt, output, format=new_format)
255
256
257
258
259
260
    else:
        raise TypeError(
            f"Input can either be a plain tensor or a bounding box datapoint, but got {type(inpt)} instead."
        )


261
def _clamp_bounding_boxes(
Philip Meier's avatar
Philip Meier committed
262
    bounding_boxes: torch.Tensor, format: BoundingBoxFormat, canvas_size: Tuple[int, int]
263
) -> torch.Tensor:
264
265
    # TODO: Investigate if it makes sense from a performance perspective to have an implementation for every
    #  BoundingBoxFormat instead of converting back and forth
266
267
268
269
    in_dtype = bounding_boxes.dtype
    bounding_boxes = bounding_boxes.clone() if bounding_boxes.is_floating_point() else bounding_boxes.float()
    xyxy_boxes = convert_format_bounding_boxes(
        bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYXY, inplace=True
270
    )
Philip Meier's avatar
Philip Meier committed
271
272
    xyxy_boxes[..., 0::2].clamp_(min=0, max=canvas_size[1])
    xyxy_boxes[..., 1::2].clamp_(min=0, max=canvas_size[0])
273
    out_boxes = convert_format_bounding_boxes(
274
275
276
        xyxy_boxes, old_format=BoundingBoxFormat.XYXY, new_format=format, inplace=True
    )
    return out_boxes.to(in_dtype)
277
278


279
def clamp_bounding_boxes(
Philip Meier's avatar
Philip Meier committed
280
    inpt: datapoints._InputTypeJIT,
281
    format: Optional[BoundingBoxFormat] = None,
Philip Meier's avatar
Philip Meier committed
282
    canvas_size: Optional[Tuple[int, int]] = None,
Philip Meier's avatar
Philip Meier committed
283
) -> datapoints._InputTypeJIT:
284
    if not torch.jit.is_scripting():
285
        _log_api_usage_once(clamp_bounding_boxes)
286
287

    if torch.jit.is_scripting() or is_simple_tensor(inpt):
Philip Meier's avatar
Philip Meier committed
288
289
290
291

        if format is None or canvas_size is None:
            raise ValueError("For simple tensor inputs, `format` and `canvas_size` has to be passed.")
        return _clamp_bounding_boxes(inpt, format=format, canvas_size=canvas_size)
292
    elif isinstance(inpt, datapoints.BoundingBoxes):
Philip Meier's avatar
Philip Meier committed
293
294
295
        if format is not None or canvas_size is not None:
            raise ValueError("For bounding box datapoint inputs, `format` and `canvas_size` must not be passed.")
        output = _clamp_bounding_boxes(inpt.as_subclass(torch.Tensor), format=inpt.format, canvas_size=inpt.canvas_size)
296
        return datapoints.BoundingBoxes.wrap_like(inpt, output)
297
298
299
300
    else:
        raise TypeError(
            f"Input can either be a plain tensor or a bounding box datapoint, but got {type(inpt)} instead."
        )