_functional_video.py 3.76 KB
Newer Older
1
2
import warnings

3
4
import torch

5

6
7
8
9
warnings.warn(
    "The 'torchvision.transforms._functional_video' module is deprecated since 0.12 and will be removed in 0.14. "
    "Please use the 'torchvision.transforms.functional' module instead."
)
Zhicheng Yan's avatar
Zhicheng Yan committed
10
11
12
13


def _is_tensor_video_clip(clip):
    if not torch.is_tensor(clip):
Nikhil Kumar's avatar
Nikhil Kumar committed
14
        raise TypeError("clip should be Tensor. Got %s" % type(clip))
Zhicheng Yan's avatar
Zhicheng Yan committed
15
16
17
18
19
20
21
22
23
24
25
26

    if not clip.ndimension() == 4:
        raise ValueError("clip should be 4D. Got %dD" % clip.dim())

    return True


def crop(clip, i, j, h, w):
    """
    Args:
        clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
    """
27
28
    if len(clip.size()) != 4:
        raise ValueError("clip should be a 4D tensor")
29
    return clip[..., i : i + h, j : j + w]
Zhicheng Yan's avatar
Zhicheng Yan committed
30
31
32


def resize(clip, target_size, interpolation_mode):
33
34
    if len(target_size) != 2:
        raise ValueError(f"target size should be tuple (height, width), instead got {target_size}")
35
    return torch.nn.functional.interpolate(clip, size=target_size, mode=interpolation_mode, align_corners=False)
Zhicheng Yan's avatar
Zhicheng Yan committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50


def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"):
    """
    Do spatial cropping and resizing to the video clip
    Args:
        clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the cropped region.
        w (int): Width of the cropped region.
        size (tuple(int, int)): height and width of resized clip
    Returns:
        clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)
    """
51
52
    if not _is_tensor_video_clip(clip):
        raise ValueError("clip should be a 4D torch.tensor")
Zhicheng Yan's avatar
Zhicheng Yan committed
53
54
55
56
57
58
    clip = crop(clip, i, j, h, w)
    clip = resize(clip, size, interpolation_mode)
    return clip


def center_crop(clip, crop_size):
59
60
    if not _is_tensor_video_clip(clip):
        raise ValueError("clip should be a 4D torch.tensor")
Zhicheng Yan's avatar
Zhicheng Yan committed
61
62
    h, w = clip.size(-2), clip.size(-1)
    th, tw = crop_size
63
64
    if h < th or w < tw:
        raise ValueError("height and width must be no smaller than crop_size")
Zhicheng Yan's avatar
Zhicheng Yan committed
65
66
67
68
69
70
71
72
73

    i = int(round((h - th) / 2.0))
    j = int(round((w - tw) / 2.0))
    return crop(clip, i, j, th, tw)


def to_tensor(clip):
    """
    Convert tensor data type from uint8 to float, divide value by 255.0 and
74
    permute the dimensions of clip tensor
Zhicheng Yan's avatar
Zhicheng Yan committed
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
    Args:
        clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
    Return:
        clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
    """
    _is_tensor_video_clip(clip)
    if not clip.dtype == torch.uint8:
        raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype))
    return clip.float().permute(3, 0, 1, 2) / 255.0


def normalize(clip, mean, std, inplace=False):
    """
    Args:
        clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
        mean (tuple): pixel RGB mean. Size is (3)
        std (tuple): pixel standard deviation. Size is (3)
    Returns:
        normalized clip (torch.tensor): Size is (C, T, H, W)
    """
95
96
    if not _is_tensor_video_clip(clip):
        raise ValueError("clip should be a 4D torch.tensor")
Zhicheng Yan's avatar
Zhicheng Yan committed
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
    if not inplace:
        clip = clip.clone()
    mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device)
    std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device)
    clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
    return clip


def hflip(clip):
    """
    Args:
        clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
    Returns:
        flipped clip (torch.tensor): Size is (C, T, H, W)
    """
112
113
    if not _is_tensor_video_clip(clip):
        raise ValueError("clip should be a 4D torch.tensor")
114
    return clip.flip(-1)