misc.py 5.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
"""
helper class that supports empty tensors on some nn functions.

Ideally, add support directly in PyTorch to empty tensors in
those functions.

This can be removed once https://github.com/pytorch/pytorch/issues/12013
is implemented
"""

11
import warnings
12
13
from typing import Callable, List, Optional

14
import torch
15
from torch import Tensor
eellison's avatar
eellison committed
16
17


18
19
20
21
22
class Conv2d(torch.nn.Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        warnings.warn(
            "torchvision.ops.misc.Conv2d is deprecated and will be "
23
24
25
            "removed in future versions, use torch.nn.Conv2d instead.",
            FutureWarning,
        )
26
27
28
29
30
31
32


class ConvTranspose2d(torch.nn.ConvTranspose2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        warnings.warn(
            "torchvision.ops.misc.ConvTranspose2d is deprecated and will be "
33
34
35
            "removed in future versions, use torch.nn.ConvTranspose2d instead.",
            FutureWarning,
        )
36
37
38
39
40
41
42


class BatchNorm2d(torch.nn.BatchNorm2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        warnings.warn(
            "torchvision.ops.misc.BatchNorm2d is deprecated and will be "
43
44
45
            "removed in future versions, use torch.nn.BatchNorm2d instead.",
            FutureWarning,
        )
46
47


48
interpolate = torch.nn.functional.interpolate
49
50
51


# This is not in nn
52
class FrozenBatchNorm2d(torch.nn.Module):
53
54
55
56
57
    """
    BatchNorm2d where the batch statistics and the affine parameters
    are fixed
    """

58
59
    def __init__(
        self,
60
        num_features: int,
61
        eps: float = 1e-5,
62
        n: Optional[int] = None,
63
    ):
64
65
        # n=None for backward-compatibility
        if n is not None:
66
            warnings.warn("`n` argument is deprecated and has been renamed `num_features`", DeprecationWarning)
67
            num_features = n
68
        super().__init__()
69
70
71
72
73
        self.eps = eps
        self.register_buffer("weight", torch.ones(num_features))
        self.register_buffer("bias", torch.zeros(num_features))
        self.register_buffer("running_mean", torch.zeros(num_features))
        self.register_buffer("running_var", torch.ones(num_features))
74

75
76
77
78
79
80
81
82
83
84
    def _load_from_state_dict(
        self,
        state_dict: dict,
        prefix: str,
        local_metadata: dict,
        strict: bool,
        missing_keys: List[str],
        unexpected_keys: List[str],
        error_msgs: List[str],
    ):
85
        num_batches_tracked_key = prefix + "num_batches_tracked"
86
87
88
        if num_batches_tracked_key in state_dict:
            del state_dict[num_batches_tracked_key]

89
        super()._load_from_state_dict(
90
91
            state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
        )
92

93
    def forward(self, x: Tensor) -> Tensor:
94
95
96
97
98
99
        # move reshapes to the beginning
        # to make it fuser-friendly
        w = self.weight.reshape(1, -1, 1, 1)
        b = self.bias.reshape(1, -1, 1, 1)
        rv = self.running_var.reshape(1, -1, 1, 1)
        rm = self.running_mean.reshape(1, -1, 1, 1)
100
        scale = w * (rv + self.eps).rsqrt()
101
102
        bias = b - rm * scale
        return x * scale + bias
103

104
    def __repr__(self) -> str:
105
        return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps})"
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123


class ConvNormActivation(torch.nn.Sequential):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: int = 3,
        stride: int = 1,
        padding: Optional[int] = None,
        groups: int = 1,
        norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm2d,
        activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
        dilation: int = 1,
        inplace: bool = True,
    ) -> None:
        if padding is None:
            padding = (kernel_size - 1) // 2 * dilation
124
125
126
127
128
129
130
131
132
133
134
135
        layers = [
            torch.nn.Conv2d(
                in_channels,
                out_channels,
                kernel_size,
                stride,
                padding,
                dilation=dilation,
                groups=groups,
                bias=norm_layer is None,
            )
        ]
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
        if norm_layer is not None:
            layers.append(norm_layer(out_channels))
        if activation_layer is not None:
            layers.append(activation_layer(inplace=inplace))
        super().__init__(*layers)
        self.out_channels = out_channels


class SqueezeExcitation(torch.nn.Module):
    def __init__(
        self,
        input_channels: int,
        squeeze_channels: int,
        activation: Callable[..., torch.nn.Module] = torch.nn.ReLU,
        scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid,
    ) -> None:
        super().__init__()
        self.avgpool = torch.nn.AdaptiveAvgPool2d(1)
        self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1)
        self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1)
        self.activation = activation()
        self.scale_activation = scale_activation()

    def _scale(self, input: Tensor) -> Tensor:
        scale = self.avgpool(input)
        scale = self.fc1(scale)
        scale = self.activation(scale)
        scale = self.fc2(scale)
        return self.scale_activation(scale)

    def forward(self, input: Tensor) -> Tensor:
        scale = self._scale(input)
        return scale * input