import warnings from typing import Callable, Any, Optional, List import torch from torch import Tensor from torch import nn from .._internally_replaced_utils import load_state_dict_from_url from ..ops.misc import ConvNormActivation from ..utils import _log_api_usage_once from ._utils import _make_divisible __all__ = ["MobileNetV2", "mobilenet_v2"] model_urls = { "mobilenet_v2": "https://download.pytorch.org/models/mobilenet_v2-b0353104.pth", } # necessary for backwards compatibility class _DeprecatedConvBNAct(ConvNormActivation): def __init__(self, *args, **kwargs): warnings.warn( "The ConvBNReLU/ConvBNActivation classes are deprecated and will be removed in future versions. " "Use torchvision.ops.misc.ConvNormActivation instead.", FutureWarning, ) if kwargs.get("norm_layer", None) is None: kwargs["norm_layer"] = nn.BatchNorm2d if kwargs.get("activation_layer", None) is None: kwargs["activation_layer"] = nn.ReLU6 super().__init__(*args, **kwargs) ConvBNReLU = _DeprecatedConvBNAct ConvBNActivation = _DeprecatedConvBNAct class InvertedResidual(nn.Module): def __init__( self, inp: int, oup: int, stride: int, expand_ratio: int, norm_layer: Optional[Callable[..., nn.Module]] = None ) -> None: super().__init__() self.stride = stride assert stride in [1, 2] if norm_layer is None: norm_layer = nn.BatchNorm2d hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup layers: List[nn.Module] = [] if expand_ratio != 1: # pw layers.append( ConvNormActivation(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6) ) layers.extend( [ # dw ConvNormActivation( hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer, activation_layer=nn.ReLU6, ), # pw-linear nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), norm_layer(oup), ] ) self.conv = nn.Sequential(*layers) self.out_channels = oup self._is_cn = stride > 1 def forward(self, x: Tensor) -> Tensor: if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class MobileNetV2(nn.Module): def __init__( self, num_classes: int = 1000, width_mult: float = 1.0, inverted_residual_setting: Optional[List[List[int]]] = None, round_nearest: int = 8, block: Optional[Callable[..., nn.Module]] = None, norm_layer: Optional[Callable[..., nn.Module]] = None, dropout: float = 0.2, ) -> None: """ MobileNet V2 main class Args: num_classes (int): Number of classes width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount inverted_residual_setting: Network structure round_nearest (int): Round the number of channels in each layer to be a multiple of this number Set to 1 to turn off rounding block: Module specifying inverted residual building block for mobilenet norm_layer: Module specifying the normalization layer to use dropout (float): The droupout probability """ super().__init__() _log_api_usage_once(self) if block is None: block = InvertedResidual if norm_layer is None: norm_layer = nn.BatchNorm2d input_channel = 32 last_channel = 1280 if inverted_residual_setting is None: inverted_residual_setting = [ # t, c, n, s [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ] # only check the first element, assuming user knows t,c,n,s are required if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: raise ValueError( f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}" ) # building first layer input_channel = _make_divisible(input_channel * width_mult, round_nearest) self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) features: List[nn.Module] = [ ConvNormActivation(3, input_channel, stride=2, norm_layer=norm_layer, activation_layer=nn.ReLU6) ] # building inverted residual blocks for t, c, n, s in inverted_residual_setting: output_channel = _make_divisible(c * width_mult, round_nearest) for i in range(n): stride = s if i == 0 else 1 features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer)) input_channel = output_channel # building last several layers features.append( ConvNormActivation( input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6 ) ) # make it nn.Sequential self.features = nn.Sequential(*features) # building classifier self.classifier = nn.Sequential( nn.Dropout(p=dropout), nn.Linear(self.last_channel, num_classes), ) # weight initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out") if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def _forward_impl(self, x: Tensor) -> Tensor: # This exists since TorchScript doesn't support inheritance, so the superclass method # (this one) needs to have a name other than `forward` that can be accessed in a subclass x = self.features(x) # Cannot use "squeeze" as batch-size can be 1 x = nn.functional.adaptive_avg_pool2d(x, (1, 1)) x = torch.flatten(x, 1) x = self.classifier(x) return x def forward(self, x: Tensor) -> Tensor: return self._forward_impl(x) def mobilenet_v2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV2: """ Constructs a MobileNetV2 architecture from `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" `_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ model = MobileNetV2(**kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls["mobilenet_v2"], progress=progress) model.load_state_dict(state_dict) return model