rec_resnet_vd.py 9.2 KB
Newer Older
WenmuZhou's avatar
WenmuZhou committed
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
LDOUBLEV's avatar
LDOUBLEV committed
2
#
WenmuZhou's avatar
WenmuZhou committed
3
4
5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
LDOUBLEV's avatar
LDOUBLEV committed
6
7
8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
WenmuZhou's avatar
WenmuZhou committed
9
10
11
12
13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LDOUBLEV's avatar
LDOUBLEV committed
14
15
16
17
18

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19
20
21
import paddle
from paddle import ParamAttr
import paddle.nn as nn
WenmuZhou's avatar
WenmuZhou committed
22
import paddle.nn.functional as F
LDOUBLEV's avatar
LDOUBLEV committed
23

WenmuZhou's avatar
WenmuZhou committed
24
__all__ = ["ResNet"]
LDOUBLEV's avatar
LDOUBLEV committed
25
26


WenmuZhou's avatar
WenmuZhou committed
27
class ConvBNLayer(nn.Layer):
28
29
30
31
32
33
34
35
36
37
    def __init__(
            self,
            in_channels,
            out_channels,
            kernel_size,
            stride=1,
            groups=1,
            is_vd_mode=False,
            act=None,
            name=None, ):
WenmuZhou's avatar
WenmuZhou committed
38
        super(ConvBNLayer, self).__init__()
39
40

        self.is_vd_mode = is_vd_mode
WenmuZhou's avatar
WenmuZhou committed
41
        self._pool2d_avg = nn.AvgPool2D(
42
            kernel_size=stride, stride=stride, padding=0, ceil_mode=True)
WenmuZhou's avatar
WenmuZhou committed
43
        self._conv = nn.Conv2D(
WenmuZhou's avatar
WenmuZhou committed
44
45
46
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
47
            stride=1 if is_vd_mode else stride,
WenmuZhou's avatar
WenmuZhou committed
48
            padding=(kernel_size - 1) // 2,
LDOUBLEV's avatar
LDOUBLEV committed
49
            groups=groups,
WenmuZhou's avatar
WenmuZhou committed
50
            weight_attr=ParamAttr(name=name + "_weights"),
LDOUBLEV's avatar
LDOUBLEV committed
51
52
53
54
55
            bias_attr=False)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
56
57
        self._batch_norm = nn.BatchNorm(
            out_channels,
LDOUBLEV's avatar
LDOUBLEV committed
58
            act=act,
59
60
61
62
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
WenmuZhou's avatar
WenmuZhou committed
63

64
65
66
67
68
69
    def forward(self, inputs):
        if self.is_vd_mode:
            inputs = self._pool2d_avg(inputs)
        y = self._conv(inputs)
        y = self._batch_norm(y)
        return y
LDOUBLEV's avatar
LDOUBLEV committed
70
71


72
class BottleneckBlock(nn.Layer):
WenmuZhou's avatar
WenmuZhou committed
73
74
75
    def __init__(self,
                 in_channels,
                 out_channels,
76
77
78
                 stride,
                 shortcut=True,
                 if_first=False,
WenmuZhou's avatar
WenmuZhou committed
79
80
                 name=None):
        super(BottleneckBlock, self).__init__()
81

WenmuZhou's avatar
WenmuZhou committed
82
83
84
85
        self.conv0 = ConvBNLayer(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=1,
LDOUBLEV's avatar
LDOUBLEV committed
86
87
            act='relu',
            name=name + "_branch2a")
WenmuZhou's avatar
WenmuZhou committed
88
89
90
91
        self.conv1 = ConvBNLayer(
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=3,
LDOUBLEV's avatar
LDOUBLEV committed
92
93
94
            stride=stride,
            act='relu',
            name=name + "_branch2b")
WenmuZhou's avatar
WenmuZhou committed
95
96
97
98
        self.conv2 = ConvBNLayer(
            in_channels=out_channels,
            out_channels=out_channels * 4,
            kernel_size=1,
LDOUBLEV's avatar
LDOUBLEV committed
99
100
101
            act=None,
            name=name + "_branch2c")

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
        if not shortcut:
            self.short = ConvBNLayer(
                in_channels=in_channels,
                out_channels=out_channels * 4,
                kernel_size=1,
                stride=stride,
                is_vd_mode=not if_first and stride[0] != 1,
                name=name + "_branch1")

        self.shortcut = shortcut

    def forward(self, inputs):
        y = self.conv0(inputs)

        conv1 = self.conv1(y)
        conv2 = self.conv2(conv1)
LDOUBLEV's avatar
LDOUBLEV committed
118

119
120
121
122
        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)
WenmuZhou's avatar
WenmuZhou committed
123
124
        y = paddle.add(x=short, y=conv2)
        y = F.relu(y)
WenmuZhou's avatar
WenmuZhou committed
125
        return y
LDOUBLEV's avatar
LDOUBLEV committed
126

WenmuZhou's avatar
WenmuZhou committed
127
128

class BasicBlock(nn.Layer):
129
130
131
132
133
134
135
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride,
                 shortcut=True,
                 if_first=False,
                 name=None):
WenmuZhou's avatar
WenmuZhou committed
136
        super(BasicBlock, self).__init__()
137
        self.stride = stride
WenmuZhou's avatar
WenmuZhou committed
138
139
140
141
        self.conv0 = ConvBNLayer(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=3,
LDOUBLEV's avatar
LDOUBLEV committed
142
            stride=stride,
143
            act='relu',
LDOUBLEV's avatar
LDOUBLEV committed
144
            name=name + "_branch2a")
WenmuZhou's avatar
WenmuZhou committed
145
146
147
148
        self.conv1 = ConvBNLayer(
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=3,
LDOUBLEV's avatar
LDOUBLEV committed
149
150
            act=None,
            name=name + "_branch2b")
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170

        if not shortcut:
            self.short = ConvBNLayer(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=1,
                stride=stride,
                is_vd_mode=not if_first and stride[0] != 1,
                name=name + "_branch1")

        self.shortcut = shortcut

    def forward(self, inputs):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)

        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)
WenmuZhou's avatar
WenmuZhou committed
171
172
        y = paddle.add(x=short, y=conv1)
        y = F.relu(y)
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
        return y


class ResNet(nn.Layer):
    def __init__(self, in_channels=3, layers=50, **kwargs):
        super(ResNet, self).__init__()

        self.layers = layers
        supported_layers = [18, 34, 50, 101, 152, 200]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(
                supported_layers, layers)

        if layers == 18:
            depth = [2, 2, 2, 2]
        elif layers == 34 or layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        elif layers == 200:
            depth = [3, 12, 48, 3]
        num_channels = [64, 256, 512,
                        1024] if layers >= 50 else [64, 64, 128, 256]
        num_filters = [64, 128, 256, 512]

        self.conv1_1 = ConvBNLayer(
WenmuZhou's avatar
WenmuZhou committed
201
            in_channels=in_channels,
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
            out_channels=32,
            kernel_size=3,
            stride=1,
            act='relu',
            name="conv1_1")
        self.conv1_2 = ConvBNLayer(
            in_channels=32,
            out_channels=32,
            kernel_size=3,
            stride=1,
            act='relu',
            name="conv1_2")
        self.conv1_3 = ConvBNLayer(
            in_channels=32,
            out_channels=64,
            kernel_size=3,
            stride=1,
            act='relu',
            name="conv1_3")
WenmuZhou's avatar
WenmuZhou committed
221
        self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
222
223
224
225
226
227
228
229
230
231
232
233
234

        self.block_list = []
        if layers >= 50:
            for block in range(len(depth)):
                shortcut = False
                for i in range(depth[block]):
                    if layers in [101, 152, 200] and block == 2:
                        if i == 0:
                            conv_name = "res" + str(block + 2) + "a"
                        else:
                            conv_name = "res" + str(block + 2) + "b" + str(i)
                    else:
                        conv_name = "res" + str(block + 2) + chr(97 + i)
WenmuZhou's avatar
WenmuZhou committed
235

236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
                    if i == 0 and block != 0:
                        stride = (2, 1)
                    else:
                        stride = (1, 1)
                    bottleneck_block = self.add_sublayer(
                        'bb_%d_%d' % (block, i),
                        BottleneckBlock(
                            in_channels=num_channels[block]
                            if i == 0 else num_filters[block] * 4,
                            out_channels=num_filters[block],
                            stride=stride,
                            shortcut=shortcut,
                            if_first=block == i == 0,
                            name=conv_name))
                    shortcut = True
                    self.block_list.append(bottleneck_block)
                self.out_channels = num_filters[block]
        else:
            for block in range(len(depth)):
                shortcut = False
                for i in range(depth[block]):
                    conv_name = "res" + str(block + 2) + chr(97 + i)
                    if i == 0 and block != 0:
                        stride = (2, 1)
                    else:
                        stride = (1, 1)

                    basic_block = self.add_sublayer(
                        'bb_%d_%d' % (block, i),
                        BasicBlock(
                            in_channels=num_channels[block]
                            if i == 0 else num_filters[block],
                            out_channels=num_filters[block],
                            stride=stride,
                            shortcut=shortcut,
                            if_first=block == i == 0,
                            name=conv_name))
                    shortcut = True
                    self.block_list.append(basic_block)
                self.out_channels = num_filters[block]
WenmuZhou's avatar
WenmuZhou committed
276
        self.out_pool = nn.MaxPool2D(kernel_size=2, stride=2, padding=0)
277
278
279
280
281
282
283
284
285
286

    def forward(self, inputs):
        y = self.conv1_1(inputs)
        y = self.conv1_2(y)
        y = self.conv1_3(y)
        y = self.pool2d_max(y)
        for block in self.block_list:
            y = block(y)
        y = self.out_pool(y)
        return y