squeezenet.py 5.34 KB
Newer Older
1
2
import torch
import torch.nn as nn
3
import torch.nn.init as init
4
from .utils import load_state_dict_from_url
5
6
7
8

__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']

model_urls = {
9
10
    'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
    'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
11
12
13
14
}


class Fire(nn.Module):
15

16
    def __init__(self, inplanes, squeeze_planes,
17
                 expand1x1_planes, expand3x3_planes):
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
        super(Fire, self).__init__()
        self.inplanes = inplanes
        self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
        self.squeeze_activation = nn.ReLU(inplace=True)
        self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
                                   kernel_size=1)
        self.expand1x1_activation = nn.ReLU(inplace=True)
        self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
                                   kernel_size=3, padding=1)
        self.expand3x3_activation = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.squeeze_activation(self.squeeze(x))
        return torch.cat([
            self.expand1x1_activation(self.expand1x1(x)),
            self.expand3x3_activation(self.expand3x3(x))
        ], 1)


class SqueezeNet(nn.Module):
38

39
    def __init__(self, version='1_0', num_classes=1000):
40
41
        super(SqueezeNet, self).__init__()
        self.num_classes = num_classes
42
        if version == '1_0':
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
            self.features = nn.Sequential(
                nn.Conv2d(3, 96, kernel_size=7, stride=2),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(96, 16, 64, 64),
                Fire(128, 16, 64, 64),
                Fire(128, 32, 128, 128),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(256, 32, 128, 128),
                Fire(256, 48, 192, 192),
                Fire(384, 48, 192, 192),
                Fire(384, 64, 256, 256),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(512, 64, 256, 256),
            )
58
        elif version == '1_1':
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
            self.features = nn.Sequential(
                nn.Conv2d(3, 64, kernel_size=3, stride=2),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(64, 16, 64, 64),
                Fire(128, 16, 64, 64),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(128, 32, 128, 128),
                Fire(256, 32, 128, 128),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(256, 48, 192, 192),
                Fire(384, 48, 192, 192),
                Fire(384, 64, 256, 256),
                Fire(512, 64, 256, 256),
            )
74
75
76
77
78
79
80
        else:
            # FIXME: Is this needed? SqueezeNet should only be called from the
            # FIXME: squeezenet1_x() functions
            # FIXME: This checking is not done for the other models
            raise ValueError("Unsupported SqueezeNet version {version}:"
                             "1_0 or 1_1 expected".format(version=version))

Allan Wang's avatar
Allan Wang committed
81
        # Final convolution is initialized differently from the rest
Sri Krishna's avatar
Sri Krishna committed
82
        final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
83
84
85
86
        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.ReLU(inplace=True),
87
            nn.AdaptiveAvgPool2d((1, 1))
88
89
90
91
92
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
93
                    init.normal_(m.weight, mean=0.0, std=0.01)
94
                else:
95
                    init.kaiming_uniform_(m.weight)
96
                if m.bias is not None:
97
                    init.constant_(m.bias, 0)
98
99
100
101
102
103
104

    def forward(self, x):
        x = self.features(x)
        x = self.classifier(x)
        return x.view(x.size(0), self.num_classes)


105
106
107
108
109
110
111
112
113
114
115
def _squeezenet(version, pretrained, progress, **kwargs):
    model = SqueezeNet(version, **kwargs)
    if pretrained:
        arch = 'squeezenet' + version
        state_dict = load_state_dict_from_url(model_urls[arch],
                                              progress=progress)
        model.load_state_dict(state_dict)
    return model


def squeezenet1_0(pretrained=False, progress=True, **kwargs):
116
117
118
119
120
121
    r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
    accuracy with 50x fewer parameters and <0.5MB model size"
    <https://arxiv.org/abs/1602.07360>`_ paper.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
122
        progress (bool): If True, displays a progress bar of the download to stderr
123
    """
124
    return _squeezenet('1_0', pretrained, progress, **kwargs)
125
126


127
def squeezenet1_1(pretrained=False, progress=True, **kwargs):
128
129
130
131
132
133
134
    r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
    <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
    SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
    than SqueezeNet 1.0, without sacrificing accuracy.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
135
        progress (bool): If True, displays a progress bar of the download to stderr
136
    """
137
    return _squeezenet('1_1', pretrained, progress, **kwargs)