det_resnet_vd.py 11.3 KB
Newer Older
WenmuZhou's avatar
WenmuZhou committed
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
LDOUBLEV's avatar
LDOUBLEV committed
2
#
WenmuZhou's avatar
WenmuZhou committed
3
4
5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
LDOUBLEV's avatar
LDOUBLEV committed
6
7
8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
WenmuZhou's avatar
WenmuZhou committed
9
10
11
12
13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LDOUBLEV's avatar
LDOUBLEV committed
14
15
16
17
18

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19
import paddle
WenmuZhou's avatar
WenmuZhou committed
20
from paddle import ParamAttr
21
import paddle.nn as nn
WenmuZhou's avatar
WenmuZhou committed
22
import paddle.nn.functional as F
LDOUBLEV's avatar
LDOUBLEV committed
23

zhiminzhang0830's avatar
zhiminzhang0830 committed
24
25
26
27
from paddle.vision.ops import DeformConv2D
from paddle.regularizer import L2Decay
from paddle.nn.initializer import Normal, Constant, XavierUniform

LDOUBLEV's avatar
LDOUBLEV committed
28
29
30
__all__ = ["ResNet"]


zhiminzhang0830's avatar
zhiminzhang0830 committed
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
class DeformableConvV2(nn.Layer):
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 weight_attr=None,
                 bias_attr=None,
                 lr_scale=1,
                 regularizer=None,
                 skip_quant=False,
                 dcn_bias_regularizer=L2Decay(0.),
                 dcn_bias_lr_scale=2.):
        super(DeformableConvV2, self).__init__()
        self.offset_channel = 2 * kernel_size**2 * groups
        self.mask_channel = kernel_size**2 * groups

        if bias_attr:
            # in FCOS-DCN head, specifically need learning_rate and regularizer
            dcn_bias_attr = ParamAttr(
                initializer=Constant(value=0),
                regularizer=dcn_bias_regularizer,
                learning_rate=dcn_bias_lr_scale)
        else:
            # in ResNet backbone, do not need bias
            dcn_bias_attr = False
        self.conv_dcn = DeformConv2D(
            in_channels,
            out_channels,
            kernel_size,
            stride=stride,
            padding=(kernel_size - 1) // 2 * dilation,
            dilation=dilation,
            deformable_groups=groups,
            weight_attr=weight_attr,
            bias_attr=dcn_bias_attr)

        if lr_scale == 1 and regularizer is None:
            offset_bias_attr = ParamAttr(initializer=Constant(0.))
        else:
            offset_bias_attr = ParamAttr(
                initializer=Constant(0.),
                learning_rate=lr_scale,
                regularizer=regularizer)
        self.conv_offset = nn.Conv2D(
            in_channels,
            groups * 3 * kernel_size**2,
            kernel_size,
            stride=stride,
            padding=(kernel_size - 1) // 2,
            weight_attr=ParamAttr(initializer=Constant(0.0)),
            bias_attr=offset_bias_attr)
        if skip_quant:
            self.conv_offset.skip_quant = True

    def forward(self, x):
        offset_mask = self.conv_offset(x)
        offset, mask = paddle.split(
            offset_mask,
            num_or_sections=[self.offset_channel, self.mask_channel],
            axis=1)
        mask = F.sigmoid(mask)
        y = self.conv_dcn(x, offset, mask=mask)
        return y


WenmuZhou's avatar
WenmuZhou committed
100
class ConvBNLayer(nn.Layer):
littletomatodonkey's avatar
littletomatodonkey committed
101
102
103
104
105
106
107
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 groups=1,
                 is_vd_mode=False,
zhiminzhang0830's avatar
zhiminzhang0830 committed
108
109
                 act=None,
                 is_dcn=False):
WenmuZhou's avatar
WenmuZhou committed
110
        super(ConvBNLayer, self).__init__()
111
112

        self.is_vd_mode = is_vd_mode
WenmuZhou's avatar
WenmuZhou committed
113
        self._pool2d_avg = nn.AvgPool2D(
114
            kernel_size=2, stride=2, padding=0, ceil_mode=True)
zhiminzhang0830's avatar
zhiminzhang0830 committed
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
        if not is_dcn:
            self._conv = nn.Conv2D(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=kernel_size,
                stride=stride,
                padding=(kernel_size - 1) // 2,
                groups=groups,
                bias_attr=False)
        else:
            self._conv = DeformableConvV2(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=kernel_size,
                stride=stride,
                padding=(kernel_size - 1) // 2,
                groups=2,  #groups,
                bias_attr=False)
littletomatodonkey's avatar
littletomatodonkey committed
133
        self._batch_norm = nn.BatchNorm(out_channels, act=act)
WenmuZhou's avatar
WenmuZhou committed
134

135
136
137
138
139
140
    def forward(self, inputs):
        if self.is_vd_mode:
            inputs = self._pool2d_avg(inputs)
        y = self._conv(inputs)
        y = self._batch_norm(y)
        return y
WenmuZhou's avatar
WenmuZhou committed
141
142


143
class BottleneckBlock(nn.Layer):
zhiminzhang0830's avatar
zhiminzhang0830 committed
144
145
146
147
148
149
150
151
    def __init__(
            self,
            in_channels,
            out_channels,
            stride,
            shortcut=True,
            if_first=False,
            is_dcn=False, ):
WenmuZhou's avatar
WenmuZhou committed
152
        super(BottleneckBlock, self).__init__()
153

WenmuZhou's avatar
WenmuZhou committed
154
155
156
157
        self.conv0 = ConvBNLayer(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=1,
littletomatodonkey's avatar
littletomatodonkey committed
158
            act='relu')
WenmuZhou's avatar
WenmuZhou committed
159
160
161
162
        self.conv1 = ConvBNLayer(
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=3,
LDOUBLEV's avatar
LDOUBLEV committed
163
            stride=stride,
zhiminzhang0830's avatar
zhiminzhang0830 committed
164
165
            act='relu',
            is_dcn=is_dcn)
WenmuZhou's avatar
WenmuZhou committed
166
167
168
169
        self.conv2 = ConvBNLayer(
            in_channels=out_channels,
            out_channels=out_channels * 4,
            kernel_size=1,
littletomatodonkey's avatar
littletomatodonkey committed
170
            act=None)
LDOUBLEV's avatar
LDOUBLEV committed
171

172
173
174
175
176
177
        if not shortcut:
            self.short = ConvBNLayer(
                in_channels=in_channels,
                out_channels=out_channels * 4,
                kernel_size=1,
                stride=1,
littletomatodonkey's avatar
littletomatodonkey committed
178
                is_vd_mode=False if if_first else True)
179
180
181
182
183
184
185

        self.shortcut = shortcut

    def forward(self, inputs):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)
        conv2 = self.conv2(conv1)
WenmuZhou's avatar
WenmuZhou committed
186

187
188
189
190
        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)
WenmuZhou's avatar
WenmuZhou committed
191
192
        y = paddle.add(x=short, y=conv2)
        y = F.relu(y)
WenmuZhou's avatar
WenmuZhou committed
193
        return y
LDOUBLEV's avatar
LDOUBLEV committed
194
195


WenmuZhou's avatar
WenmuZhou committed
196
class BasicBlock(nn.Layer):
littletomatodonkey's avatar
littletomatodonkey committed
197
198
199
200
201
202
203
    def __init__(
            self,
            in_channels,
            out_channels,
            stride,
            shortcut=True,
            if_first=False, ):
WenmuZhou's avatar
WenmuZhou committed
204
        super(BasicBlock, self).__init__()
205
        self.stride = stride
WenmuZhou's avatar
WenmuZhou committed
206
207
208
209
        self.conv0 = ConvBNLayer(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=3,
LDOUBLEV's avatar
LDOUBLEV committed
210
            stride=stride,
littletomatodonkey's avatar
littletomatodonkey committed
211
            act='relu')
WenmuZhou's avatar
WenmuZhou committed
212
213
214
215
        self.conv1 = ConvBNLayer(
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=3,
littletomatodonkey's avatar
littletomatodonkey committed
216
            act=None)
WenmuZhou's avatar
WenmuZhou committed
217

218
219
220
221
222
223
        if not shortcut:
            self.short = ConvBNLayer(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=1,
                stride=1,
littletomatodonkey's avatar
littletomatodonkey committed
224
                is_vd_mode=False if if_first else True)
225
226
227
228
229
230

        self.shortcut = shortcut

    def forward(self, inputs):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)
WenmuZhou's avatar
WenmuZhou committed
231

232
233
234
235
        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)
WenmuZhou's avatar
WenmuZhou committed
236
237
        y = paddle.add(x=short, y=conv1)
        y = F.relu(y)
238
        return y
WenmuZhou's avatar
WenmuZhou committed
239
240


241
class ResNet(nn.Layer):
zhiminzhang0830's avatar
zhiminzhang0830 committed
242
243
244
245
246
247
    def __init__(self,
                 in_channels=3,
                 layers=50,
                 dcn_stage=None,
                 out_indices=None,
                 **kwargs):
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
        super(ResNet, self).__init__()

        self.layers = layers
        supported_layers = [18, 34, 50, 101, 152, 200]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(
                supported_layers, layers)

        if layers == 18:
            depth = [2, 2, 2, 2]
        elif layers == 34 or layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        elif layers == 200:
            depth = [3, 12, 48, 3]
        num_channels = [64, 256, 512,
                        1024] if layers >= 50 else [64, 64, 128, 256]
        num_filters = [64, 128, 256, 512]

zhiminzhang0830's avatar
zhiminzhang0830 committed
270
271
272
273
274
275
276
        self.dcn_stage = dcn_stage if dcn_stage is not None else [
            False, False, False, False
        ]
        self.out_indices = out_indices if out_indices is not None else [
            0, 1, 2, 3
        ]

277
278
279
280
281
        self.conv1_1 = ConvBNLayer(
            in_channels=in_channels,
            out_channels=32,
            kernel_size=3,
            stride=2,
littletomatodonkey's avatar
littletomatodonkey committed
282
            act='relu')
283
284
285
286
287
        self.conv1_2 = ConvBNLayer(
            in_channels=32,
            out_channels=32,
            kernel_size=3,
            stride=1,
littletomatodonkey's avatar
littletomatodonkey committed
288
            act='relu')
289
290
291
292
293
        self.conv1_3 = ConvBNLayer(
            in_channels=32,
            out_channels=64,
            kernel_size=3,
            stride=1,
littletomatodonkey's avatar
littletomatodonkey committed
294
            act='relu')
WenmuZhou's avatar
WenmuZhou committed
295
        self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
296
297
298
299
300
301
302

        self.stages = []
        self.out_channels = []
        if layers >= 50:
            for block in range(len(depth)):
                block_list = []
                shortcut = False
zhiminzhang0830's avatar
zhiminzhang0830 committed
303
                is_dcn = self.dcn_stage[block]
304
305
306
307
308
309
310
311
312
                for i in range(depth[block]):
                    bottleneck_block = self.add_sublayer(
                        'bb_%d_%d' % (block, i),
                        BottleneckBlock(
                            in_channels=num_channels[block]
                            if i == 0 else num_filters[block] * 4,
                            out_channels=num_filters[block],
                            stride=2 if i == 0 and block != 0 else 1,
                            shortcut=shortcut,
zhiminzhang0830's avatar
zhiminzhang0830 committed
313
314
                            if_first=block == i == 0,
                            is_dcn=is_dcn))
315
316
                    shortcut = True
                    block_list.append(bottleneck_block)
zhiminzhang0830's avatar
zhiminzhang0830 committed
317
318
                if block in self.out_indices:
                    self.out_channels.append(num_filters[block] * 4)
319
320
321
322
323
                self.stages.append(nn.Sequential(*block_list))
        else:
            for block in range(len(depth)):
                block_list = []
                shortcut = False
zhiminzhang0830's avatar
zhiminzhang0830 committed
324
                # is_dcn = self.dcn_stage[block]
325
326
327
328
329
330
331
332
333
                for i in range(depth[block]):
                    basic_block = self.add_sublayer(
                        'bb_%d_%d' % (block, i),
                        BasicBlock(
                            in_channels=num_channels[block]
                            if i == 0 else num_filters[block],
                            out_channels=num_filters[block],
                            stride=2 if i == 0 and block != 0 else 1,
                            shortcut=shortcut,
littletomatodonkey's avatar
littletomatodonkey committed
334
                            if_first=block == i == 0))
335
336
                    shortcut = True
                    block_list.append(basic_block)
zhiminzhang0830's avatar
zhiminzhang0830 committed
337
338
                if block in self.out_indices:
                    self.out_channels.append(num_filters[block])
339
                self.stages.append(nn.Sequential(*block_list))
WenmuZhou's avatar
WenmuZhou committed
340

341
342
343
344
345
346
    def forward(self, inputs):
        y = self.conv1_1(inputs)
        y = self.conv1_2(y)
        y = self.conv1_3(y)
        y = self.pool2d_max(y)
        out = []
zhiminzhang0830's avatar
zhiminzhang0830 committed
347
        for i, block in enumerate(self.stages):
348
            y = block(y)
zhiminzhang0830's avatar
zhiminzhang0830 committed
349
350
            if i in self.out_indices:
                out.append(y)
351
        return out