_video.py 9.31 KB
Newer Older
1
2
from __future__ import annotations

3
from typing import Any, List, Optional, Tuple, Union
4
5
6
7

import torch
from torchvision.transforms.functional import InterpolationMode

Philip Meier's avatar
Philip Meier committed
8
from ._datapoint import _FillTypeJIT, Datapoint
9
10


11
class Video(Datapoint):
Philip Meier's avatar
Philip Meier committed
12
13
14
15
16
17
18
19
20
21
22
23
    """[BETA] :class:`torch.Tensor` subclass for videos.

    Args:
        data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
        dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
            ``data``.
        device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
            :class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
        requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
            ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
    """

24
    @classmethod
25
    def _wrap(cls, tensor: torch.Tensor) -> Video:
26
27
        video = tensor.as_subclass(cls)
        return video
28

29
30
31
32
33
34
    def __new__(
        cls,
        data: Any,
        *,
        dtype: Optional[torch.dtype] = None,
        device: Optional[Union[torch.device, str, int]] = None,
35
        requires_grad: Optional[bool] = None,
36
    ) -> Video:
37
        tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
38
39
        if data.ndim < 4:
            raise ValueError
40
        return cls._wrap(tensor)
41
42

    @classmethod
43
44
    def wrap_like(cls, other: Video, tensor: torch.Tensor) -> Video:
        return cls._wrap(tensor)
45

46
    def __repr__(self, *, tensor_contents: Any = None) -> str:  # type: ignore[override]
47
        return self._make_repr()
48

49
    @property
50
    def spatial_size(self) -> Tuple[int, int]:
51
        return tuple(self.shape[-2:])  # type: ignore[return-value]
52
53
54
55
56
57
58
59
60
61

    @property
    def num_channels(self) -> int:
        return self.shape[-3]

    @property
    def num_frames(self) -> int:
        return self.shape[-4]

    def horizontal_flip(self) -> Video:
62
        output = self._F.horizontal_flip_video(self.as_subclass(torch.Tensor))
63
        return Video.wrap_like(self, output)
64
65

    def vertical_flip(self) -> Video:
66
        output = self._F.vertical_flip_video(self.as_subclass(torch.Tensor))
67
        return Video.wrap_like(self, output)
68
69
70
71

    def resize(  # type: ignore[override]
        self,
        size: List[int],
72
        interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
73
        max_size: Optional[int] = None,
74
        antialias: Optional[Union[str, bool]] = "warn",
75
    ) -> Video:
76
77
78
79
80
81
82
        output = self._F.resize_video(
            self.as_subclass(torch.Tensor),
            size,
            interpolation=interpolation,
            max_size=max_size,
            antialias=antialias,
        )
83
        return Video.wrap_like(self, output)
84
85

    def crop(self, top: int, left: int, height: int, width: int) -> Video:
86
        output = self._F.crop_video(self.as_subclass(torch.Tensor), top, left, height, width)
87
        return Video.wrap_like(self, output)
88
89

    def center_crop(self, output_size: List[int]) -> Video:
90
        output = self._F.center_crop_video(self.as_subclass(torch.Tensor), output_size=output_size)
91
        return Video.wrap_like(self, output)
92
93
94
95
96
97
98
99

    def resized_crop(
        self,
        top: int,
        left: int,
        height: int,
        width: int,
        size: List[int],
100
        interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
101
        antialias: Optional[Union[str, bool]] = "warn",
102
103
    ) -> Video:
        output = self._F.resized_crop_video(
104
105
106
107
108
109
110
111
            self.as_subclass(torch.Tensor),
            top,
            left,
            height,
            width,
            size=list(size),
            interpolation=interpolation,
            antialias=antialias,
112
        )
113
        return Video.wrap_like(self, output)
114
115
116

    def pad(
        self,
117
118
        padding: List[int],
        fill: Optional[Union[int, float, List[float]]] = None,
119
120
        padding_mode: str = "constant",
    ) -> Video:
121
        output = self._F.pad_video(self.as_subclass(torch.Tensor), padding, fill=fill, padding_mode=padding_mode)
122
        return Video.wrap_like(self, output)
123
124
125
126

    def rotate(
        self,
        angle: float,
127
        interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
128
129
        expand: bool = False,
        center: Optional[List[float]] = None,
Philip Meier's avatar
Philip Meier committed
130
        fill: _FillTypeJIT = None,
131
    ) -> Video:
132
133
        output = self._F.rotate_video(
            self.as_subclass(torch.Tensor), angle, interpolation=interpolation, expand=expand, fill=fill, center=center
134
        )
135
        return Video.wrap_like(self, output)
136
137
138
139
140
141
142

    def affine(
        self,
        angle: Union[int, float],
        translate: List[float],
        scale: float,
        shear: List[float],
143
        interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
Philip Meier's avatar
Philip Meier committed
144
        fill: _FillTypeJIT = None,
145
146
        center: Optional[List[float]] = None,
    ) -> Video:
147
148
        output = self._F.affine_video(
            self.as_subclass(torch.Tensor),
149
150
151
152
153
154
155
156
            angle,
            translate=translate,
            scale=scale,
            shear=shear,
            interpolation=interpolation,
            fill=fill,
            center=center,
        )
157
        return Video.wrap_like(self, output)
158
159
160

    def perspective(
        self,
161
162
        startpoints: Optional[List[List[int]]],
        endpoints: Optional[List[List[int]]],
163
        interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
Philip Meier's avatar
Philip Meier committed
164
        fill: _FillTypeJIT = None,
165
        coefficients: Optional[List[float]] = None,
166
    ) -> Video:
167
        output = self._F.perspective_video(
168
169
170
171
172
173
            self.as_subclass(torch.Tensor),
            startpoints,
            endpoints,
            interpolation=interpolation,
            fill=fill,
            coefficients=coefficients,
174
        )
175
        return Video.wrap_like(self, output)
176
177
178
179

    def elastic(
        self,
        displacement: torch.Tensor,
180
        interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
Philip Meier's avatar
Philip Meier committed
181
        fill: _FillTypeJIT = None,
182
    ) -> Video:
183
184
185
        output = self._F.elastic_video(
            self.as_subclass(torch.Tensor), displacement, interpolation=interpolation, fill=fill
        )
186
        return Video.wrap_like(self, output)
187

188
    def rgb_to_grayscale(self, num_output_channels: int = 1) -> Video:
189
190
191
192
193
        output = self._F.rgb_to_grayscale_image_tensor(
            self.as_subclass(torch.Tensor), num_output_channels=num_output_channels
        )
        return Video.wrap_like(self, output)

194
    def adjust_brightness(self, brightness_factor: float) -> Video:
195
        output = self._F.adjust_brightness_video(self.as_subclass(torch.Tensor), brightness_factor=brightness_factor)
196
        return Video.wrap_like(self, output)
197
198

    def adjust_saturation(self, saturation_factor: float) -> Video:
199
        output = self._F.adjust_saturation_video(self.as_subclass(torch.Tensor), saturation_factor=saturation_factor)
200
        return Video.wrap_like(self, output)
201
202

    def adjust_contrast(self, contrast_factor: float) -> Video:
203
        output = self._F.adjust_contrast_video(self.as_subclass(torch.Tensor), contrast_factor=contrast_factor)
204
        return Video.wrap_like(self, output)
205
206

    def adjust_sharpness(self, sharpness_factor: float) -> Video:
207
        output = self._F.adjust_sharpness_video(self.as_subclass(torch.Tensor), sharpness_factor=sharpness_factor)
208
        return Video.wrap_like(self, output)
209
210

    def adjust_hue(self, hue_factor: float) -> Video:
211
        output = self._F.adjust_hue_video(self.as_subclass(torch.Tensor), hue_factor=hue_factor)
212
        return Video.wrap_like(self, output)
213
214

    def adjust_gamma(self, gamma: float, gain: float = 1) -> Video:
215
        output = self._F.adjust_gamma_video(self.as_subclass(torch.Tensor), gamma=gamma, gain=gain)
216
        return Video.wrap_like(self, output)
217
218

    def posterize(self, bits: int) -> Video:
219
        output = self._F.posterize_video(self.as_subclass(torch.Tensor), bits=bits)
220
        return Video.wrap_like(self, output)
221
222

    def solarize(self, threshold: float) -> Video:
223
        output = self._F.solarize_video(self.as_subclass(torch.Tensor), threshold=threshold)
224
        return Video.wrap_like(self, output)
225
226

    def autocontrast(self) -> Video:
227
        output = self._F.autocontrast_video(self.as_subclass(torch.Tensor))
228
        return Video.wrap_like(self, output)
229
230

    def equalize(self) -> Video:
231
        output = self._F.equalize_video(self.as_subclass(torch.Tensor))
232
        return Video.wrap_like(self, output)
233
234

    def invert(self) -> Video:
235
        output = self._F.invert_video(self.as_subclass(torch.Tensor))
236
        return Video.wrap_like(self, output)
237
238

    def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Video:
239
        output = self._F.gaussian_blur_video(self.as_subclass(torch.Tensor), kernel_size=kernel_size, sigma=sigma)
240
        return Video.wrap_like(self, output)
241

242
243
244
245
    def normalize(self, mean: List[float], std: List[float], inplace: bool = False) -> Video:
        output = self._F.normalize_video(self.as_subclass(torch.Tensor), mean=mean, std=std, inplace=inplace)
        return Video.wrap_like(self, output)

246

Philip Meier's avatar
Philip Meier committed
247
248
249
250
_VideoType = Union[torch.Tensor, Video]
_VideoTypeJIT = torch.Tensor
_TensorVideoType = Union[torch.Tensor, Video]
_TensorVideoTypeJIT = torch.Tensor