dcnv3.py 12.2 KB
Newer Older
PRC-Huang's avatar
PRC-Huang committed
1
2
3
4
5
6
# --------------------------------------------------------
# InternImage
# Copyright (c) 2022 OpenGVLab
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------

zhe chen's avatar
zhe chen committed
7
from __future__ import absolute_import, division, print_function
PRC-Huang's avatar
PRC-Huang committed
8
9

import warnings
zhe chen's avatar
zhe chen committed
10

11
import torch
PRC-Huang's avatar
PRC-Huang committed
12
import torch.nn.functional as F
zhe chen's avatar
zhe chen committed
13
14
15
from torch import nn
from torch.nn.init import constant_, xavier_uniform_

PRC-Huang's avatar
PRC-Huang committed
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from ..functions import DCNv3Function, dcnv3_core_pytorch


class to_channels_first(nn.Module):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return x.permute(0, 3, 1, 2)


class to_channels_last(nn.Module):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return x.permute(0, 2, 3, 1)


def build_norm_layer(dim,
                     norm_layer,
                     in_format='channels_last',
                     out_format='channels_last',
                     eps=1e-6):
    layers = []
    if norm_layer == 'BN':
        if in_format == 'channels_last':
            layers.append(to_channels_first())
        layers.append(nn.BatchNorm2d(dim))
        if out_format == 'channels_last':
            layers.append(to_channels_last())
    elif norm_layer == 'LN':
        if in_format == 'channels_first':
            layers.append(to_channels_last())
        layers.append(nn.LayerNorm(dim, eps=eps))
        if out_format == 'channels_first':
            layers.append(to_channels_first())
    else:
        raise NotImplementedError(
            f'build_norm_layer does not support {norm_layer}')
    return nn.Sequential(*layers)


def build_act_layer(act_layer):
    if act_layer == 'ReLU':
        return nn.ReLU(inplace=True)
    elif act_layer == 'SiLU':
        return nn.SiLU(inplace=True)
    elif act_layer == 'GELU':
        return nn.GELU()

    raise NotImplementedError(f'build_act_layer does not support {act_layer}')


def _is_power_of_2(n):
    if (not isinstance(n, int)) or (n < 0):
        raise ValueError(
zhe chen's avatar
zhe chen committed
75
            'invalid input for _is_power_of_2: {} (type: {})'.format(n, type(n)))
PRC-Huang's avatar
PRC-Huang committed
76

77
    return (n & (n - 1) == 0) and n != 0
PRC-Huang's avatar
PRC-Huang committed
78
79


80
81
82
83
84
85
86
87
88
class CenterFeatureScaleModule(nn.Module):
    def forward(self,
                query,
                center_feature_scale_proj_weight,
                center_feature_scale_proj_bias):
        center_feature_scale = F.linear(query,
                                        weight=center_feature_scale_proj_weight,
                                        bias=center_feature_scale_proj_bias).sigmoid()
        return center_feature_scale
89

90

PRC-Huang's avatar
PRC-Huang committed
91
92
class DCNv3_pytorch(nn.Module):
    def __init__(
93
94
95
96
97
98
99
100
101
102
103
            self,
            channels=64,
            kernel_size=3,
            dw_kernel_size=None,
            stride=1,
            pad=1,
            dilation=1,
            group=4,
            offset_scale=1.0,
            act_layer='GELU',
            norm_layer='LN',
104
105
106
            center_feature_scale=False,
            remove_center=False,
    ):
PRC-Huang's avatar
PRC-Huang committed
107
108
        """
        DCNv3 Module
109
110
111
112
        :param channels
        :param kernel_size
        :param stride
        :param pad
PRC-Huang's avatar
PRC-Huang committed
113
114
115
116
117
118
119
120
121
122
123
        :param dilation
        :param group
        :param offset_scale
        :param act_layer
        :param norm_layer
        """
        super().__init__()
        if channels % group != 0:
            raise ValueError(
                f'channels must be divisible by group, but got {channels} and {group}')
        _d_per_group = channels // group
124
        dw_kernel_size = dw_kernel_size if dw_kernel_size is not None else kernel_size
PRC-Huang's avatar
PRC-Huang committed
125
126
127
128
        # you'd better set _d_per_group to a power of 2 which is more efficient in our CUDA implementation
        if not _is_power_of_2(_d_per_group):
            warnings.warn(
                "You'd better set channels in DCNv3 to make the dimension of each attention head a power of 2 "
zhe chen's avatar
zhe chen committed
129
                'which is more efficient in our CUDA implementation.')
PRC-Huang's avatar
PRC-Huang committed
130
131
132
133

        self.offset_scale = offset_scale
        self.channels = channels
        self.kernel_size = kernel_size
134
        self.dw_kernel_size = dw_kernel_size
PRC-Huang's avatar
PRC-Huang committed
135
        self.stride = stride
136
        self.dilation = dilation
PRC-Huang's avatar
PRC-Huang committed
137
138
139
140
        self.pad = pad
        self.group = group
        self.group_channels = channels // group
        self.offset_scale = offset_scale
141
        self.center_feature_scale = center_feature_scale
142
        self.remove_center = int(remove_center)
PRC-Huang's avatar
PRC-Huang committed
143
144
145
146
147

        self.dw_conv = nn.Sequential(
            nn.Conv2d(
                channels,
                channels,
148
                kernel_size=dw_kernel_size,
PRC-Huang's avatar
PRC-Huang committed
149
                stride=1,
150
                padding=(dw_kernel_size - 1) // 2,
PRC-Huang's avatar
PRC-Huang committed
151
152
153
154
155
156
157
158
159
                groups=channels),
            build_norm_layer(
                channels,
                norm_layer,
                'channels_first',
                'channels_last'),
            build_act_layer(act_layer))
        self.offset = nn.Linear(
            channels,
160
            group * (kernel_size * kernel_size - remove_center) * 2)
PRC-Huang's avatar
PRC-Huang committed
161
162
        self.mask = nn.Linear(
            channels,
163
            group * (kernel_size * kernel_size - remove_center))
PRC-Huang's avatar
PRC-Huang committed
164
165
166
        self.input_proj = nn.Linear(channels, channels)
        self.output_proj = nn.Linear(channels, channels)
        self._reset_parameters()
zhe chen's avatar
zhe chen committed
167

168
169
170
171
172
173
        if center_feature_scale:
            self.center_feature_scale_proj_weight = nn.Parameter(
                torch.zeros((group, channels), dtype=torch.float))
            self.center_feature_scale_proj_bias = nn.Parameter(
                torch.tensor(0.0, dtype=torch.float).view((1,)).repeat(group, ))
            self.center_feature_scale_module = CenterFeatureScaleModule()
PRC-Huang's avatar
PRC-Huang committed
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192

    def _reset_parameters(self):
        constant_(self.offset.weight.data, 0.)
        constant_(self.offset.bias.data, 0.)
        constant_(self.mask.weight.data, 0.)
        constant_(self.mask.bias.data, 0.)
        xavier_uniform_(self.input_proj.weight.data)
        constant_(self.input_proj.bias.data, 0.)
        xavier_uniform_(self.output_proj.weight.data)
        constant_(self.output_proj.bias.data, 0.)

    def forward(self, input):
        """
        :param query                       (N, H, W, C)
        :return output                     (N, H, W, C)
        """
        N, H, W, _ = input.shape

        x = self.input_proj(input)
193
        x_proj = x
PRC-Huang's avatar
PRC-Huang committed
194
195
196
197
198
199
200
201
202
203
204
205
206
207

        x1 = input.permute(0, 3, 1, 2)
        x1 = self.dw_conv(x1)
        offset = self.offset(x1)
        mask = self.mask(x1).reshape(N, H, W, self.group, -1)
        mask = F.softmax(mask, -1).reshape(N, H, W, -1)

        x = dcnv3_core_pytorch(
            x, offset, mask,
            self.kernel_size, self.kernel_size,
            self.stride, self.stride,
            self.pad, self.pad,
            self.dilation, self.dilation,
            self.group, self.group_channels,
208
            self.offset_scale, self.remove_center)
209
210
211
212
213
214
215
        if self.center_feature_scale:
            center_feature_scale = self.center_feature_scale_module(
                x1, self.center_feature_scale_proj_weight, self.center_feature_scale_proj_bias)
            # N, H, W, groups -> N, H, W, groups, 1 -> N, H, W, groups, _d_per_group -> N, H, W, channels
            center_feature_scale = center_feature_scale[..., None].repeat(
                1, 1, 1, 1, self.channels // self.group).flatten(-2)
            x = x * (1 - center_feature_scale) + x_proj * center_feature_scale
PRC-Huang's avatar
PRC-Huang committed
216
217
218
219
220
221
222
        x = self.output_proj(x)

        return x


class DCNv3(nn.Module):
    def __init__(
223
224
225
226
227
228
229
230
231
232
233
234
235
236
        self,
        channels=64,
        kernel_size=3,
        dw_kernel_size=None,
        stride=1,
        pad=1,
        dilation=1,
        group=4,
        offset_scale=1.0,
        act_layer='GELU',
        norm_layer='LN',
        center_feature_scale=False,
        remove_center=False,
    ):
PRC-Huang's avatar
PRC-Huang committed
237
238
        """
        DCNv3 Module
239
240
241
242
        :param channels
        :param kernel_size
        :param stride
        :param pad
PRC-Huang's avatar
PRC-Huang committed
243
244
245
246
247
248
249
250
251
252
253
        :param dilation
        :param group
        :param offset_scale
        :param act_layer
        :param norm_layer
        """
        super().__init__()
        if channels % group != 0:
            raise ValueError(
                f'channels must be divisible by group, but got {channels} and {group}')
        _d_per_group = channels // group
254
        dw_kernel_size = dw_kernel_size if dw_kernel_size is not None else kernel_size
PRC-Huang's avatar
PRC-Huang committed
255
256
257
258
        # you'd better set _d_per_group to a power of 2 which is more efficient in our CUDA implementation
        if not _is_power_of_2(_d_per_group):
            warnings.warn(
                "You'd better set channels in DCNv3 to make the dimension of each attention head a power of 2 "
zhe chen's avatar
zhe chen committed
259
                'which is more efficient in our CUDA implementation.')
PRC-Huang's avatar
PRC-Huang committed
260
261
262
263

        self.offset_scale = offset_scale
        self.channels = channels
        self.kernel_size = kernel_size
264
        self.dw_kernel_size = dw_kernel_size
PRC-Huang's avatar
PRC-Huang committed
265
        self.stride = stride
266
        self.dilation = dilation
PRC-Huang's avatar
PRC-Huang committed
267
268
269
270
        self.pad = pad
        self.group = group
        self.group_channels = channels // group
        self.offset_scale = offset_scale
271
        self.center_feature_scale = center_feature_scale
272
273
274
275
276
        self.remove_center = int(remove_center)

        if self.remove_center and self.kernel_size % 2 == 0:
            raise ValueError('remove_center is only compatible with odd kernel size.')

PRC-Huang's avatar
PRC-Huang committed
277
278
279
280
        self.dw_conv = nn.Sequential(
            nn.Conv2d(
                channels,
                channels,
281
                kernel_size=dw_kernel_size,
PRC-Huang's avatar
PRC-Huang committed
282
                stride=1,
283
                padding=(dw_kernel_size - 1) // 2,
PRC-Huang's avatar
PRC-Huang committed
284
285
286
287
288
289
290
291
292
                groups=channels),
            build_norm_layer(
                channels,
                norm_layer,
                'channels_first',
                'channels_last'),
            build_act_layer(act_layer))
        self.offset = nn.Linear(
            channels,
293
            group * (kernel_size * kernel_size - remove_center) * 2)
PRC-Huang's avatar
PRC-Huang committed
294
295
        self.mask = nn.Linear(
            channels,
296
            group * (kernel_size * kernel_size - remove_center))
PRC-Huang's avatar
PRC-Huang committed
297
298
299
        self.input_proj = nn.Linear(channels, channels)
        self.output_proj = nn.Linear(channels, channels)
        self._reset_parameters()
zhe chen's avatar
zhe chen committed
300

301
302
303
304
305
306
        if center_feature_scale:
            self.center_feature_scale_proj_weight = nn.Parameter(
                torch.zeros((group, channels), dtype=torch.float))
            self.center_feature_scale_proj_bias = nn.Parameter(
                torch.tensor(0.0, dtype=torch.float).view((1,)).repeat(group, ))
            self.center_feature_scale_module = CenterFeatureScaleModule()
PRC-Huang's avatar
PRC-Huang committed
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325

    def _reset_parameters(self):
        constant_(self.offset.weight.data, 0.)
        constant_(self.offset.bias.data, 0.)
        constant_(self.mask.weight.data, 0.)
        constant_(self.mask.bias.data, 0.)
        xavier_uniform_(self.input_proj.weight.data)
        constant_(self.input_proj.bias.data, 0.)
        xavier_uniform_(self.output_proj.weight.data)
        constant_(self.output_proj.bias.data, 0.)

    def forward(self, input):
        """
        :param query                       (N, H, W, C)
        :return output                     (N, H, W, C)
        """
        N, H, W, _ = input.shape

        x = self.input_proj(input)
326
        x_proj = x
PRC-Huang's avatar
PRC-Huang committed
327
328
329
330
331
332
        dtype = x.dtype

        x1 = input.permute(0, 3, 1, 2)
        x1 = self.dw_conv(x1)
        offset = self.offset(x1)
        mask = self.mask(x1).reshape(N, H, W, self.group, -1)
333
334
        mask = F.softmax(mask, -1)
        mask = mask.reshape(N, H, W, -1).type(dtype)
zhe chen's avatar
zhe chen committed
335

PRC-Huang's avatar
PRC-Huang committed
336
337
338
339
340
341
342
343
        x = DCNv3Function.apply(
            x, offset, mask,
            self.kernel_size, self.kernel_size,
            self.stride, self.stride,
            self.pad, self.pad,
            self.dilation, self.dilation,
            self.group, self.group_channels,
            self.offset_scale,
344
345
            256,
            self.remove_center)
zhe chen's avatar
zhe chen committed
346

347
348
349
350
351
352
353
        if self.center_feature_scale:
            center_feature_scale = self.center_feature_scale_module(
                x1, self.center_feature_scale_proj_weight, self.center_feature_scale_proj_bias)
            # N, H, W, groups -> N, H, W, groups, 1 -> N, H, W, groups, _d_per_group -> N, H, W, channels
            center_feature_scale = center_feature_scale[..., None].repeat(
                1, 1, 1, 1, self.channels // self.group).flatten(-2)
            x = x * (1 - center_feature_scale) + x_proj * center_feature_scale
PRC-Huang's avatar
PRC-Huang committed
354
355
356
        x = self.output_proj(x)

        return x