shufflenetv2.py 4.91 KB
Newer Older
1
from typing import Any, Optional
2

3
4
import torch
import torch.nn as nn
5
from torch import Tensor
6
from torchvision.models import shufflenetv2
7

8
from ..._internally_replaced_utils import load_state_dict_from_url
9
10
11
from .utils import _replace_relu, quantize_model

__all__ = [
12
13
14
    "QuantizableShuffleNetV2",
    "shufflenet_v2_x0_5",
    "shufflenet_v2_x1_0",
15
16
17
]

quant_model_urls = {
18
    "shufflenetv2_x0.5_fbgemm": "https://download.pytorch.org/models/quantized/shufflenetv2_x0.5_fbgemm-00845098.pth",
19
    "shufflenetv2_x1.0_fbgemm": "https://download.pytorch.org/models/quantized/shufflenetv2_x1_fbgemm-db332c57.pth",
20
21
22
23
}


class QuantizableInvertedResidual(shufflenetv2.InvertedResidual):
24
    def __init__(self, *args: Any, **kwargs: Any) -> None:
25
        super().__init__(*args, **kwargs)
26
27
        self.cat = nn.quantized.FloatFunctional()

28
    def forward(self, x: Tensor) -> Tensor:
29
30
        if self.stride == 1:
            x1, x2 = x.chunk(2, dim=1)
31
            out = self.cat.cat([x1, self.branch2(x2)], dim=1)
32
        else:
33
            out = self.cat.cat([self.branch1(x), self.branch2(x)], dim=1)
34
35
36
37
38
39
40

        out = shufflenetv2.channel_shuffle(out, 2)

        return out


class QuantizableShuffleNetV2(shufflenetv2.ShuffleNetV2):
41
42
    # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
    def __init__(self, *args: Any, **kwargs: Any) -> None:
43
        super().__init__(*args, inverted_residual=QuantizableInvertedResidual, **kwargs)  # type: ignore[misc]
44
45
        self.quant = torch.ao.quantization.QuantStub()
        self.dequant = torch.ao.quantization.DeQuantStub()
46

47
    def forward(self, x: Tensor) -> Tensor:
48
        x = self.quant(x)
49
        x = self._forward_impl(x)
50
51
52
        x = self.dequant(x)
        return x

53
    def fuse_model(self) -> None:
54
55
56
57
58
59
60
61
62
        r"""Fuse conv/bn/relu modules in shufflenetv2 model

        Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        """

        for name, m in self._modules.items():
            if name in ["conv1", "conv5"]:
63
                torch.ao.quantization.fuse_modules(m, [["0", "1", "2"]], inplace=True)
64
        for m in self.modules():
65
            if type(m) is QuantizableInvertedResidual:
66
                if len(m.branch1._modules.items()) > 0:
67
68
                    torch.ao.quantization.fuse_modules(m.branch1, [["0", "1"], ["2", "3", "4"]], inplace=True)
                torch.ao.quantization.fuse_modules(
69
70
71
72
73
74
                    m.branch2,
                    [["0", "1", "2"], ["3", "4"], ["5", "6", "7"]],
                    inplace=True,
                )


75
76
77
78
79
80
81
82
83
def _shufflenetv2(
    arch: str,
    pretrained: bool,
    progress: bool,
    quantize: bool,
    *args: Any,
    **kwargs: Any,
) -> QuantizableShuffleNetV2:

84
85
86
87
88
    model = QuantizableShuffleNetV2(*args, **kwargs)
    _replace_relu(model)

    if quantize:
        # TODO use pretrained as a string to specify the backend
89
        backend = "fbgemm"
90
91
92
93
94
        quantize_model(model, backend)
    else:
        assert pretrained in [True, False]

    if pretrained:
95
        model_url: Optional[str] = None
96
        if quantize:
97
            model_url = quant_model_urls[arch + "_" + backend]
98
99
100
        else:
            model_url = shufflenetv2.model_urls[arch]

101
        state_dict = load_state_dict_from_url(model_url, progress=progress)
102
103
104
105
106

        model.load_state_dict(state_dict)
    return model


107
108
109
110
111
112
def shufflenet_v2_x0_5(
    pretrained: bool = False,
    progress: bool = True,
    quantize: bool = False,
    **kwargs: Any,
) -> QuantizableShuffleNetV2:
113
114
115
116
117
118
119
120
    """
    Constructs a ShuffleNetV2 with 0.5x output channels, as described in
    `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
    <https://arxiv.org/abs/1807.11164>`_.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
121
        quantize (bool): If True, return a quantized version of the model
122
    """
123
124
125
    return _shufflenetv2(
        "shufflenetv2_x0.5", pretrained, progress, quantize, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs
    )
126
127


128
129
130
131
132
133
def shufflenet_v2_x1_0(
    pretrained: bool = False,
    progress: bool = True,
    quantize: bool = False,
    **kwargs: Any,
) -> QuantizableShuffleNetV2:
134
135
136
137
138
139
140
141
    """
    Constructs a ShuffleNetV2 with 1.0x output channels, as described in
    `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
    <https://arxiv.org/abs/1807.11164>`_.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
142
        quantize (bool): If True, return a quantized version of the model
143
    """
144
145
146
    return _shufflenetv2(
        "shufflenetv2_x1.0", pretrained, progress, quantize, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs
    )