_presets.py 7.19 KB
Newer Older
1
2
3
4
5
"""
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
"""
from typing import Optional, Tuple
6
7
8
9

import torch
from torch import Tensor, nn

10
from . import functional as F, InterpolationMode
11
12


13
__all__ = [
14
15
16
17
18
    "ObjectDetection",
    "ImageClassification",
    "VideoClassification",
    "SemanticSegmentation",
    "OpticalFlow",
19
]
20
21


22
23
class ObjectDetection(nn.Module):
    def forward(self, img: Tensor) -> Tensor:
24
25
        if not isinstance(img, Tensor):
            img = F.pil_to_tensor(img)
26
        return F.convert_image_dtype(img, torch.float)
27

28
29
30
31
32
33
    def __repr__(self) -> str:
        return self.__class__.__name__ + "()"

    def describe(self) -> str:
        return "The images are rescaled to ``[0.0, 1.0]``."

34

35
class ImageClassification(nn.Module):
36
37
    def __init__(
        self,
38
        *,
39
40
41
42
        crop_size: int,
        resize_size: int = 256,
        mean: Tuple[float, ...] = (0.485, 0.456, 0.406),
        std: Tuple[float, ...] = (0.229, 0.224, 0.225),
43
        interpolation: InterpolationMode = InterpolationMode.BILINEAR,
44
    ) -> None:
45
        super().__init__()
46
47
48
49
50
        self.crop_size = [crop_size]
        self.resize_size = [resize_size]
        self.mean = list(mean)
        self.std = list(std)
        self.interpolation = interpolation
51
52

    def forward(self, img: Tensor) -> Tensor:
53
54
        img = F.resize(img, self.resize_size, interpolation=self.interpolation)
        img = F.center_crop(img, self.crop_size)
55
56
57
        if not isinstance(img, Tensor):
            img = F.pil_to_tensor(img)
        img = F.convert_image_dtype(img, torch.float)
58
        img = F.normalize(img, mean=self.mean, std=self.std)
59
        return img
60

61
62
63
64
65
66
67
68
69
70
71
72
73
    def __repr__(self) -> str:
        format_string = self.__class__.__name__ + "("
        format_string += f"\n    crop_size={self.crop_size}"
        format_string += f"\n    resize_size={self.resize_size}"
        format_string += f"\n    mean={self.mean}"
        format_string += f"\n    std={self.std}"
        format_string += f"\n    interpolation={self.interpolation}"
        format_string += "\n)"
        return format_string

    def describe(self) -> str:
        return (
            f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``, "
74
75
            f"followed by a central crop of ``crop_size={self.crop_size}``. Finally the values are first rescaled to "
            f"``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and ``std={self.std}``."
76
77
        )

78

79
class VideoClassification(nn.Module):
80
81
    def __init__(
        self,
82
        *,
83
        crop_size: Tuple[int, int],
84
        resize_size: Tuple[int, int],
85
86
        mean: Tuple[float, ...] = (0.43216, 0.394666, 0.37645),
        std: Tuple[float, ...] = (0.22803, 0.22145, 0.216989),
87
        interpolation: InterpolationMode = InterpolationMode.BILINEAR,
88
89
    ) -> None:
        super().__init__()
90
91
92
93
94
        self.crop_size = list(crop_size)
        self.resize_size = list(resize_size)
        self.mean = list(mean)
        self.std = list(std)
        self.interpolation = interpolation
95
96

    def forward(self, vid: Tensor) -> Tensor:
97
98
99
100
101
102
103
104
        need_squeeze = False
        if vid.ndim < 5:
            vid = vid.unsqueeze(dim=0)
            need_squeeze = True

        vid = vid.permute(0, 1, 4, 2, 3)  # (N, T, H, W, C) => (N, T, C, H, W)
        N, T, C, H, W = vid.shape
        vid = vid.view(-1, C, H, W)
105
106
        vid = F.resize(vid, self.resize_size, interpolation=self.interpolation)
        vid = F.center_crop(vid, self.crop_size)
107
        vid = F.convert_image_dtype(vid, torch.float)
108
109
        vid = F.normalize(vid, mean=self.mean, std=self.std)
        H, W = self.crop_size
110
111
112
113
114
115
        vid = vid.view(N, T, C, H, W)
        vid = vid.permute(0, 2, 1, 3, 4)  # (N, T, C, H, W) => (N, C, T, H, W)

        if need_squeeze:
            vid = vid.squeeze(dim=0)
        return vid
116

117
118
119
120
121
122
123
124
125
126
127
128
129
    def __repr__(self) -> str:
        format_string = self.__class__.__name__ + "("
        format_string += f"\n    crop_size={self.crop_size}"
        format_string += f"\n    resize_size={self.resize_size}"
        format_string += f"\n    mean={self.mean}"
        format_string += f"\n    std={self.std}"
        format_string += f"\n    interpolation={self.interpolation}"
        format_string += "\n)"
        return format_string

    def describe(self) -> str:
        return (
            f"The video frames are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``, "
130
131
            f"followed by a central crop of ``crop_size={self.crop_size}``. Finally the values are first rescaled to "
            f"``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and ``std={self.std}``."
132
133
        )

134

135
class SemanticSegmentation(nn.Module):
136
137
    def __init__(
        self,
138
139
        *,
        resize_size: Optional[int],
140
141
        mean: Tuple[float, ...] = (0.485, 0.456, 0.406),
        std: Tuple[float, ...] = (0.229, 0.224, 0.225),
142
        interpolation: InterpolationMode = InterpolationMode.BILINEAR,
143
144
    ) -> None:
        super().__init__()
145
146
147
148
        self.resize_size = [resize_size] if resize_size is not None else None
        self.mean = list(mean)
        self.std = list(std)
        self.interpolation = interpolation
149

150
    def forward(self, img: Tensor) -> Tensor:
151
152
        if isinstance(self.resize_size, list):
            img = F.resize(img, self.resize_size, interpolation=self.interpolation)
153
154
155
        if not isinstance(img, Tensor):
            img = F.pil_to_tensor(img)
        img = F.convert_image_dtype(img, torch.float)
156
        img = F.normalize(img, mean=self.mean, std=self.std)
157
        return img
158

159
160
161
162
163
164
165
166
167
168
169
170
    def __repr__(self) -> str:
        format_string = self.__class__.__name__ + "("
        format_string += f"\n    resize_size={self.resize_size}"
        format_string += f"\n    mean={self.mean}"
        format_string += f"\n    std={self.std}"
        format_string += f"\n    interpolation={self.interpolation}"
        format_string += "\n)"
        return format_string

    def describe(self) -> str:
        return (
            f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. "
171
172
            f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and "
            f"``std={self.std}``."
173
174
        )

175

176
177
178
179
180
181
class OpticalFlow(nn.Module):
    def forward(self, img1: Tensor, img2: Tensor) -> Tuple[Tensor, Tensor]:
        if not isinstance(img1, Tensor):
            img1 = F.pil_to_tensor(img1)
        if not isinstance(img2, Tensor):
            img2 = F.pil_to_tensor(img2)
182

183
184
        img1 = F.convert_image_dtype(img1, torch.float)
        img2 = F.convert_image_dtype(img2, torch.float)
185
186
187
188
189
190
191
192

        # map [0, 1] into [-1, 1]
        img1 = F.normalize(img1, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        img2 = F.normalize(img2, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

        img1 = img1.contiguous()
        img2 = img2.contiguous()

193
        return img1, img2
194
195
196
197
198
199

    def __repr__(self) -> str:
        return self.__class__.__name__ + "()"

    def describe(self) -> str:
        return "The images are rescaled to ``[-1.0, 1.0]``."